]> err.no Git - linux-2.6/commitdiff
Merge branch 'upstream-fixes' into upstream
authorJeff Garzik <jeff@garzik.org>
Sat, 20 May 2006 04:36:08 +0000 (00:36 -0400)
committerJeff Garzik <jeff@garzik.org>
Sat, 20 May 2006 04:36:08 +0000 (00:36 -0400)
Conflicts:

drivers/scsi/libata-core.c

143 files changed:
CREDITS
Documentation/devices.txt
Documentation/feature-removal-schedule.txt
Documentation/memory-barriers.txt
Documentation/spi/pxa2xx [new file with mode: 0644]
Documentation/spi/spi-summary
Documentation/watchdog/watchdog-api.txt
MAINTAINERS
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/dma-isa.c
arch/arm/kernel/process.c
arch/arm/lib/backtrace.S
arch/arm/lib/div64.S
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-realview/realview_eb.c
arch/arm/mach-s3c2410/sleep.S
arch/arm/mm/ioremap.c
arch/i386/Kconfig
arch/i386/kernel/acpi/boot.c
arch/i386/oprofile/nmi_int.c
arch/ia64/configs/sn2_defconfig
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/platforms/pseries/setup.c
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/syscalls.S
arch/s390/kernel/time.c
arch/x86_64/kernel/pci-nommu.c
arch/x86_64/kernel/traps.c
arch/x86_64/mm/srat.c
drivers/char/Kconfig
drivers/char/rio/host.h
drivers/char/rio/rioboot.c
drivers/char/rio/rioctrl.c
drivers/char/rio/rioioctl.h
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm_tis.c
drivers/char/watchdog/i8xx_tco.c
drivers/char/watchdog/s3c2410_wdt.c
drivers/char/watchdog/sc1200wdt.c
drivers/ide/legacy/ide-cs.c
drivers/ieee1394/ohci1394.c
drivers/ieee1394/sbp2.c
drivers/ieee1394/sbp2.h
drivers/infiniband/core/uverbs_mem.c
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/isdn/capi/capi.c
drivers/isdn/gigaset/usb-gigaset.c
drivers/leds/Kconfig
drivers/leds/led-class.c
drivers/leds/ledtrig-timer.c
drivers/mmc/au1xmmc.c
drivers/mmc/imxmmc.c
drivers/mmc/mmc.c
drivers/mmc/mmc_block.c
drivers/mmc/pxamci.c
drivers/mmc/wbsd.c
drivers/net/b44.c
drivers/net/dl2k.c
drivers/net/ixp2000/enp2611.c
drivers/net/ixp2000/pm3386.c
drivers/net/ixp2000/pm3386.h
drivers/net/sky2.c
drivers/pci/quirks.c
drivers/pcmcia/pcmcia_ioctl.c
drivers/s390/net/lcs.c
drivers/scsi/libata-core.c
drivers/scsi/sata_mv.c
drivers/serial/serial_core.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/pxa2xx_spi.c [new file with mode: 0644]
drivers/spi/spi.c
drivers/spi/spi_bitbang.c
drivers/video/backlight/backlight.c
drivers/video/backlight/lcd.c
fs/9p/fcall.c
fs/9p/mux.c
fs/9p/mux.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/Makefile
fs/autofs4/autofs_i.h
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/compat.c
fs/configfs/dir.c
fs/jffs2/nodelist.c
fs/namespace.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/journal.c
fs/ocfs2/uptodate.c
fs/ocfs2/vote.c
fs/open.c
fs/smbfs/dir.c
fs/smbfs/request.c
include/asm-arm/arch-pxa/pxa2xx_spi.h [new file with mode: 0644]
include/asm-arm/procinfo.h
include/asm-arm/spinlock.h
include/asm-s390/unistd.h
include/linux/kernel.h
include/linux/mmc/mmc.h
include/linux/rcupdate.h
include/linux/slab.h
include/linux/spi/spi.h
include/linux/spi/spi_bitbang.h
include/linux/swap.h
include/net/sctp/command.h
include/net/sctp/sctp.h
init/do_mounts.c
init/initramfs.c
kernel/extable.c
kernel/module.c
kernel/rcupdate.c
mm/page_alloc.c
mm/slab.c
mm/sparse.c
net/802/tr.c
net/bridge/netfilter/ebt_log.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_nat_proto_gre.c
net/ipv4/netfilter/ipt_LOG.c
net/ipv4/netfilter/ipt_recent.c
net/ipv4/tcp_input.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_LOG.c
net/ipv6/netfilter/ip6t_eui64.c
net/ipx/af_ipx.c
net/ipx/ipx_route.c
net/netfilter/nfnetlink_log.c
net/sched/sch_generic.c
net/sctp/input.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
security/selinux/ss/services.c

diff --git a/CREDITS b/CREDITS
index 6f50be37fa0f81dbcccd5929b0c92c4f59775dd6..9bf714a1c7d97e92a634161c3538ee93e30f07d3 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3241,14 +3241,9 @@ S: 12725 SW Millikan Way, Suite 400
 S: Beaverton, Oregon 97005
 S: USA
 
-N: Marcelo W. Tosatti
-E: marcelo.tosatti@cyclades.com
-D: Miscellaneous kernel hacker
+N: Marcelo Tosatti
+E: marcelo@kvack.org
 D: v2.4 kernel maintainer
-D: Current pc300/cyclades maintainer
-S: Cyclades Corporation
-S: Av Cristovao Colombo, 462. Floresta.
-S: Porto Alegre
 S: Brazil
 
 N: Stefan Traby
index 3c406acd4dfae8c65e4f88ab5552416972c98276..b369a8c46a7311c8d5561d730a95a6049cee2ff2 100644 (file)
@@ -1721,11 +1721,6 @@ Your cooperation is appreciated.
                These devices support the same API as the generic SCSI
                devices.
 
- 97 block      Packet writing for CD/DVD devices
-                 0 = /dev/pktcdvd0     First packet-writing module
-                 1 = /dev/pktcdvd1     Second packet-writing module
-                   ...
-
  98 char       Control and Measurement Device (comedi)
                  0 = /dev/comedi0      First comedi device
                  1 = /dev/comedi1      Second comedi device
index 421bcfff6ad21be680fcf819243ca9d750fa8ab2..43ab119963d5ef706f1f5c602526a4a9caf28ae9 100644 (file)
@@ -57,6 +57,15 @@ Who: Jody McIntyre <scjody@steamballoon.com>
 
 ---------------------------
 
+What:  sbp2: module parameter "force_inquiry_hack"
+When:  July 2006
+Why:   Superceded by parameter "workarounds". Both parameters are meant to be
+       used ad-hoc and for single devices only, i.e. not in modprobe.conf,
+       therefore the impact of this feature replacement should be low.
+Who:   Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+---------------------------
+
 What:  Video4Linux API 1 ioctls and video_decoder.h from Video devices.
 When:  July 2006
 Why:   V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
index 92f0056d928c1f5ff485e250a93a843bb60058bc..c61d8b876fdbcba484a3ad149ac3d057dfb76bba 100644 (file)
@@ -1031,7 +1031,7 @@ conflict on any particular lock.
 LOCKS VS MEMORY ACCESSES
 ------------------------
 
-Consider the following: the system has a pair of spinlocks (N) and (Q), and
+Consider the following: the system has a pair of spinlocks (M) and (Q), and
 three CPUs; then should the following sequence of events occur:
 
        CPU 1                           CPU 2
@@ -1678,7 +1678,7 @@ CPU's caches by some other cache event:
        smp_wmb();
        <A:modify v=2>  <C:busy>
                        <C:queue v=2>
-       p = &b;         q = p;
+       p = &v;         q = p;
                        <D:request p>
        <B:modify p=&v> <D:commit p=&v>
                        <D:read p>
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
new file mode 100644 (file)
index 0000000..9c45f3d
--- /dev/null
@@ -0,0 +1,234 @@
+PXA2xx SPI on SSP driver HOWTO
+===================================================
+This a mini howto on the pxa2xx_spi driver.  The driver turns a PXA2xx
+synchronous serial port into a SPI master controller
+(see Documentation/spi/spi_summary). The driver has the following features
+
+- Support for any PXA2xx SSP
+- SSP PIO and SSP DMA data transfers.
+- External and Internal (SSPFRM) chip selects.
+- Per slave device (chip) configuration.
+- Full suspend, freeze, resume support.
+
+The driver is built around a "spi_message" fifo serviced by workqueue and a
+tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
+(pump_transfer) is responsible for queuing SPI transactions and setting up and
+launching the dma/interrupt driven transfers.
+
+Declaring PXA2xx Master Controllers
+-----------------------------------
+Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
+"platform device".  The master configuration is passed to the driver via a table
+found in include/asm-arm/arch-pxa/pxa2xx_spi.h:
+
+struct pxa2xx_spi_master {
+       enum pxa_ssp_type ssp_type;
+       u32 clock_enable;
+       u16 num_chipselect;
+       u8 enable_dma;
+};
+
+The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
+informs the driver which features a particular SSP supports.
+
+The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
+corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
+the "PXA2xx Developer Manual" section "Clocks and Power Management".
+
+The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
+slave device (chips) attached to this SPI master.
+
+The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
+be used.  This caused the driver to acquire two DMA channels: rx_channel and
+tx_channel.  The rx_channel has a higher DMA service priority the tx_channel.
+See the "PXA2xx Developer Manual" section "DMA Controller".
+
+NSSP MASTER SAMPLE
+------------------
+Below is a sample configuration using the PXA255 NSSP.
+
+static struct resource pxa_spi_nssp_resources[] = {
+       [0] = {
+               .start  = __PREG(SSCR0_P(2)), /* Start address of NSSP */
+               .end    = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = IRQ_NSSP, /* NSSP IRQ */
+               .end    = IRQ_NSSP,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct pxa2xx_spi_master pxa_nssp_master_info = {
+       .ssp_type = PXA25x_NSSP, /* Type of SSP */
+       .clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */
+       .num_chipselect = 1, /* Matches the number of chips attached to NSSP */
+       .enable_dma = 1, /* Enables NSSP DMA */
+};
+
+static struct platform_device pxa_spi_nssp = {
+       .name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */
+       .id = 2, /* Bus number, MUST MATCH SSP number 1..n */
+       .resource = pxa_spi_nssp_resources,
+       .num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
+       .dev = {
+               .platform_data = &pxa_nssp_master_info, /* Passed to driver */
+       },
+};
+
+static struct platform_device *devices[] __initdata = {
+       &pxa_spi_nssp,
+};
+
+static void __init board_init(void)
+{
+       (void)platform_add_device(devices, ARRAY_SIZE(devices));
+}
+
+Declaring Slave Devices
+-----------------------
+Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
+using the "spi_board_info" structure found in "linux/spi/spi.h". See
+"Documentation/spi/spi_summary" for additional information.
+
+Each slave device attached to the PXA must provide slave specific configuration
+information via the structure "pxa2xx_spi_chip" found in
+"include/asm-arm/arch-pxa/pxa2xx_spi.h".  The pxa2xx_spi master controller driver
+will uses the configuration whenever the driver communicates with the slave
+device.
+
+struct pxa2xx_spi_chip {
+       u8 tx_threshold;
+       u8 rx_threshold;
+       u8 dma_burst_size;
+       u32 timeout_microsecs;
+       u8 enable_loopback;
+       void (*cs_control)(u32 command);
+};
+
+The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
+used to configure the SSP hardware fifo.  These fields are critical to the
+performance of pxa2xx_spi driver and misconfiguration will result in rx
+fifo overruns (especially in PIO mode transfers). Good default values are
+
+       .tx_threshold = 12,
+       .rx_threshold = 4,
+
+The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
+engine and is related the "spi_device.bits_per_word" field.  Read and understand
+the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
+to determine the correct value. An SSP configured for byte-wide transfers would
+use a value of 8.
+
+The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle
+trailing bytes in the SSP receiver fifo.  The correct value for this field is
+dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
+slave device.  Please note the the PXA2xx SSP 1 does not support trailing byte
+timeouts and must busy-wait any trailing bytes.
+
+The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting
+into internal loopback mode.  In this mode the SSP controller internally
+connects the SSPTX pin the the SSPRX pin.  This is useful for initial setup
+testing.
+
+The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
+function for asserting/deasserting a slave device chip select.  If the field is
+NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
+configured to use SSPFRM instead.
+
+NSSP SALVE SAMPLE
+-----------------
+The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
+"spi_board_info.controller_data" field. Below is a sample configuration using
+the PXA255 NSSP.
+
+/* Chip Select control for the CS8415A SPI slave device */
+static void cs8415a_cs_control(u32 command)
+{
+       if (command & PXA2XX_CS_ASSERT)
+               GPCR(2) = GPIO_bit(2);
+       else
+               GPSR(2) = GPIO_bit(2);
+}
+
+/* Chip Select control for the CS8405A SPI slave device */
+static void cs8405a_cs_control(u32 command)
+{
+       if (command & PXA2XX_CS_ASSERT)
+               GPCR(3) = GPIO_bit(3);
+       else
+               GPSR(3) = GPIO_bit(3);
+}
+
+static struct pxa2xx_spi_chip cs8415a_chip_info = {
+       .tx_threshold = 12, /* SSP hardward FIFO threshold */
+       .rx_threshold = 4, /* SSP hardward FIFO threshold */
+       .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
+       .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
+       .cs_control = cs8415a_cs_control, /* Use external chip select */
+};
+
+static struct pxa2xx_spi_chip cs8405a_chip_info = {
+       .tx_threshold = 12, /* SSP hardward FIFO threshold */
+       .rx_threshold = 4, /* SSP hardward FIFO threshold */
+       .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
+       .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
+       .cs_control = cs8405a_cs_control, /* Use external chip select */
+};
+
+static struct spi_board_info streetracer_spi_board_info[] __initdata = {
+       {
+               .modalias = "cs8415a", /* Name of spi_driver for this device */
+               .max_speed_hz = 3686400, /* Run SSP as fast a possbile */
+               .bus_num = 2, /* Framework bus number */
+               .chip_select = 0, /* Framework chip select */
+               .platform_data = NULL; /* No spi_driver specific config */
+               .controller_data = &cs8415a_chip_info, /* Master chip config */
+               .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+       },
+       {
+               .modalias = "cs8405a", /* Name of spi_driver for this device */
+               .max_speed_hz = 3686400, /* Run SSP as fast a possbile */
+               .bus_num = 2, /* Framework bus number */
+               .chip_select = 1, /* Framework chip select */
+               .controller_data = &cs8405a_chip_info, /* Master chip config */
+               .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+       },
+};
+
+static void __init streetracer_init(void)
+{
+       spi_register_board_info(streetracer_spi_board_info,
+                               ARRAY_SIZE(streetracer_spi_board_info));
+}
+
+
+DMA and PIO I/O Support
+-----------------------
+The pxa2xx_spi driver support both DMA and interrupt driven PIO message
+transfers.  The driver defaults to PIO mode and DMA transfers must enabled by
+setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and
+ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero.  The DMA
+mode support both coherent and stream based DMA mappings.
+
+The following logic is used to determine the type of I/O to be used on
+a per "spi_transfer" basis:
+
+if !enable_dma or dma_burst_size == 0 then
+       always use PIO transfers
+
+if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
+       use coherent DMA mode
+
+if rx_buf and tx_buf are aligned on 8 byte boundary then
+       use streaming DMA mode
+
+otherwise
+       use PIO transfer
+
+THANKS TO
+---------
+
+David Brownell and others for mentoring the development of this driver.
+
index a5ffba33a35188a0d858871c06c090361de81b88..068732d32276b1f32bf285c10522b7e103e781e4 100644 (file)
@@ -414,7 +414,33 @@ to get the driver-private data allocated for that device.
 The driver will initialize the fields of that spi_master, including the
 bus number (maybe the same as the platform device ID) and three methods
 used to interact with the SPI core and SPI protocol drivers.  It will
-also initialize its own internal state.
+also initialize its own internal state.  (See below about bus numbering
+and those methods.)
+
+After you initialize the spi_master, then use spi_register_master() to
+publish it to the rest of the system.  At that time, device nodes for
+the controller and any predeclared spi devices will be made available,
+and the driver model core will take care of binding them to drivers.
+
+If you need to remove your SPI controller driver, spi_unregister_master()
+will reverse the effect of spi_register_master().
+
+
+BUS NUMBERING
+
+Bus numbering is important, since that's how Linux identifies a given
+SPI bus (shared SCK, MOSI, MISO).  Valid bus numbers start at zero.  On
+SOC systems, the bus numbers should match the numbers defined by the chip
+manufacturer.  For example, hardware controller SPI2 would be bus number 2,
+and spi_board_info for devices connected to it would use that number.
+
+If you don't have such hardware-assigned bus number, and for some reason
+you can't just assign them, then provide a negative bus number.  That will
+then be replaced by a dynamically assigned number. You'd then need to treat
+this as a non-static configuration (see above).
+
+
+SPI MASTER METHODS
 
     master->setup(struct spi_device *spi)
        This sets up the device clock rate, SPI mode, and word sizes.
@@ -431,6 +457,9 @@ also initialize its own internal state.
        state it dynamically associates with that device.  If you do that,
        be sure to provide the cleanup() method to free that state.
 
+
+SPI MESSAGE QUEUE
+
 The bulk of the driver will be managing the I/O queue fed by transfer().
 
 That queue could be purely conceptual.  For example, a driver used only
@@ -440,6 +469,9 @@ But the queue will probably be very real, using message->queue, PIO,
 often DMA (especially if the root filesystem is in SPI flash), and
 execution contexts like IRQ handlers, tasklets, or workqueues (such
 as keventd).  Your driver can be as fancy, or as simple, as you need.
+Such a transfer() method would normally just add the message to a
+queue, and then start some asynchronous transfer engine (unless it's
+already running).
 
 
 THANKS TO
index c5beb548cfc42b781fd825ff9345f411f087897d..21ed5117366240d2a33af5af7f5605733bd514c1 100644 (file)
@@ -36,6 +36,9 @@ timeout or margin.  The simplest way to ping the watchdog is to write
 some data to the device.  So a very simple watchdog daemon would look
 like this:
 
+#include <stdlib.h>
+#include <fcntl.h>
+
 int main(int argc, const char *argv[]) {
        int fd=open("/dev/watchdog",O_WRONLY);
        if (fd==-1) {
index 5e33558714167ecafd03bbe7190fcc4a2dea269e..753584cf4e7e30a50224a8e908dc6fa695e2336f 100644 (file)
@@ -1603,6 +1603,11 @@ M:       James.Bottomley@HansenPartnership.com
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
 
+LED SUBSYSTEM
+P:     Richard Purdie
+M:     rpurdie@rpsys.net
+S:     Maintained
+
 LEGO USB Tower driver
 P:     Juergen Stuber
 M:     starblue@users.sourceforge.net
@@ -1662,7 +1667,7 @@ S:        Maintained
 
 LINUX FOR POWERPC EMBEDDED PPC8XX
 P:     Marcelo Tosatti
-M:     marcelo.tosatti@cyclades.com
+M:     marcelo@kvack.org
 W:     http://www.penguinppc.org/
 L:     linuxppc-embedded@ozlabs.org
 S:     Maintained
@@ -2513,6 +2518,12 @@ M:       perex@suse.cz
 L:     alsa-devel@alsa-project.org
 S:     Maintained
 
+SPI SUBSYSTEM
+P:     David Brownell
+M:     dbrownell@users.sourceforge.net
+L:     spi-devel-general@lists.sourceforge.net
+S:     Maintained
+
 TPM DEVICE DRIVER
 P:     Kylene Hall
 M:     kjhall@us.ibm.com
index 45fdf4a51a2ad0a7d1d127ff9dc75898a3aa6369..396efba9bacd2d6fb6351e18149576cc1d779665 100644 (file)
@@ -99,6 +99,8 @@ int main(void)
   DEFINE(MACHINFO_NAME,                offsetof(struct machine_desc, name));
   DEFINE(MACHINFO_PHYSIO,      offsetof(struct machine_desc, phys_io));
   DEFINE(MACHINFO_PGOFFIO,     offsetof(struct machine_desc, io_pg_offst));
+  BLANK();
+  DEFINE(PROC_INFO_SZ,         sizeof(struct proc_info_list));
   DEFINE(PROCINFO_INITFUNC,    offsetof(struct proc_info_list, __cpu_flush));
   DEFINE(PROCINFO_MMUFLAGS,    offsetof(struct proc_info_list, __cpu_mmu_flags));
   return 0; 
index 03532769a97f38840bea46f2969c914d857eb9d4..0a3e9ad297d8ba8be45967e811be5d226941eb90 100644 (file)
@@ -143,12 +143,23 @@ static struct dma_ops isa_dma_ops = {
        .residue        = isa_get_dma_residue,
 };
 
-static struct resource dma_resources[] = {
-       { "dma1",               0x0000, 0x000f },
-       { "dma low page",       0x0080, 0x008f },
-       { "dma2",               0x00c0, 0x00df },
-       { "dma high page",      0x0480, 0x048f }
-};
+static struct resource dma_resources[] = { {
+       .name   = "dma1",
+       .start  = 0x0000,
+       .end    = 0x000f
+}, {
+       .name   = "dma low page",
+       .start  = 0x0080,
+       .end    = 0x008f
+}, {
+       .name   = "dma2",
+       .start  = 0x00c0,
+       .end    = 0x00df
+}, {
+       .name   = "dma high page",
+       .start  = 0x0480,
+       .end    = 0x048f
+} };
 
 void __init isa_init_dma(dma_t *dma)
 {
index 1a1539e3a9462eef17f949445f116356bee810de..7df6e1aaa323b83ae0cedad37a943404d106868f 100644 (file)
@@ -311,7 +311,7 @@ void free_thread_info(struct thread_info *thread)
                struct thread_info_list *th = &get_cpu_var(thread_info_list);
                if (th->nr < EXTRA_TASK_STRUCT) {
                        unsigned long *p = (unsigned long *)thread;
-                       p[0] = th->head;
+                       p[0] = (unsigned long)th->head;
                        th->head = p;
                        th->nr += 1;
                        put_cpu_var(thread_info_list);
index 3bdc8c6949c59aabf7ef812c2b890307bb0ab43f..16153c86c3f871a69674f7c293784981431f6b56 100644 (file)
@@ -122,7 +122,7 @@ ENTRY(c_backtrace)
 #define reg   r5
 #define stack r6
 
-.Ldumpstm:     stmfd   sp!, {instr, reg, stack, r7, lr}
+.Ldumpstm:     stmfd   sp!, {instr, reg, stack, r7, r8, lr}
                mov     stack, r0
                mov     instr, r1
                mov     reg, #9
@@ -145,7 +145,7 @@ ENTRY(c_backtrace)
                adrne   r0, .Lcr
                blne    printk
                mov     r0, stack
-               LOADREGS(fd, sp!, {instr, reg, stack, r7, pc})
+               LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
 
 .Lfp:          .asciz  " r%d = %08X%c"
 .Lcr:          .asciz  "\n"
index ec9a1cd6176fddb9c97fbb79992ac6699d6ad07f..58eef66076293a923d00a81e03370bcc1c734f83 100644 (file)
@@ -189,12 +189,12 @@ ENTRY(__do_div64)
        moveq   pc, lr
 
        @ Division by 0:
-       str     lr, [sp, #-4]!
+       str     lr, [sp, #-8]!
        bl      __div0
 
        @ as wrong as it could be...
        mov     yl, #0
        mov     yh, #0
        mov     xh, #0
-       ldr     pc, [sp], #4
+       ldr     pc, [sp], #8
 
index 98356f810007fad31d4fdc2cc39bea2e9b1b5c68..02e188d98e7d6d8565c4d06a2ddf9988776126e1 100644 (file)
@@ -95,7 +95,10 @@ static void __init mainstone_init_irq(void)
        for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
                set_irq_chip(irq, &mainstone_irq_chip);
                set_irq_handler(irq, do_level_IRQ);
-               set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+               if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
+                       set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
+               else
+                       set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
        }
        set_irq_flags(MAINSTONE_IRQ(8), 0);
        set_irq_flags(MAINSTONE_IRQ(12), 0);
index d4a586e38d5b2665c797b9558ff24abdc8937a54..693fb1e396e033e95ecd6afdce8552dec4ef4591 100644 (file)
@@ -137,8 +137,11 @@ static struct amba_device *amba_devs[] __initdata = {
 static void __init gic_init_irq(void)
 {
 #ifdef CONFIG_REALVIEW_MPCORE
+       unsigned int pldctrl;
        writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
-       writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
+       pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8);
+       pldctrl |= 0x00800000;  /* New irq mode */
+       writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8);
        writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
 #endif
        gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
index 832fb86a03b430343dd26e9e5be83168ab644c17..73de2eaca22a1c136db339177eb95d1acc4a7610 100644 (file)
@@ -59,8 +59,7 @@ ENTRY(s3c2410_cpu_suspend)
        mrc     p15, 0, r5, c13, c0, 0  @ PID
        mrc     p15, 0, r6, c3, c0, 0   @ Domain ID
        mrc     p15, 0, r7, c2, c0, 0   @ translation table base address
-       mrc     p15, 0, r8, c2, c0, 0   @ auxiliary control register
-       mrc     p15, 0, r9, c1, c0, 0   @ control register
+       mrc     p15, 0, r8, c1, c0, 0   @ control register
 
        stmia   r0, { r4 - r13 }
 
@@ -165,7 +164,6 @@ ENTRY(s3c2410_cpu_resume)
        mcr     p15, 0, r5, c13, c0, 0          @ PID
        mcr     p15, 0, r6, c3, c0, 0           @ Domain ID
        mcr     p15, 0, r7, c2, c0, 0           @ translation table base
-       mcr     p15, 0, r8, c1, c1, 0           @ auxilliary control
 
 #ifdef CONFIG_DEBUG_RESUME
        mov     r3, #'R'
@@ -173,7 +171,7 @@ ENTRY(s3c2410_cpu_resume)
 #endif
 
        ldr     r2, =resume_with_mmu
-       mcr     p15, 0, r9, c1, c0, 0           @ turn on MMU, etc
+       mcr     p15, 0, r8, c1, c0, 0           @ turn on MMU, etc
        nop                                     @ second-to-last before mmu
        mov     pc, r2                          @ go back to virtual address
 
index 25e0ca3e598cfc97ac0140aaa383effaf5e76b77..c1f7180c7beda07ff35d3211880902b8a83c2a2c 100644 (file)
@@ -141,7 +141,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
                return NULL;
        addr = (unsigned long)area->addr;
        if (remap_area_pages(addr, pfn, size, flags)) {
-               vfree((void *)addr);
+               vunmap((void *)addr);
                return NULL;
        }
        return (void __iomem *) (offset + (char *)addr);
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(__ioremap);
 
 void __iounmap(void __iomem *addr)
 {
-       vfree((void *) (PAGE_MASK & (unsigned long) addr));
+       vunmap((void *)(PAGE_MASK & (unsigned long)addr));
 }
 EXPORT_SYMBOL(__iounmap);
 
index c6fe99e57a0526ae2884eb49d500023ca3ef6ea0..8dfa3054f10fb0ce83576277f080caccf7644535 100644 (file)
@@ -758,10 +758,10 @@ config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
        depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
        ---help---
-         Say Y here to experiment with turning CPUs off and on.  CPUs
-         can be controlled through /sys/devices/system/cpu.
+         Say Y here to experiment with turning CPUs off and on, and to
+         enable suspend on SMP systems. CPUs can be controlled through
+         /sys/devices/system/cpu.
 
-         Say N.
 
 endmenu
 
index 40e5aba3ad3d425f1545ff7e199e1356a56e38d8..daee69579b1c6cd6dba672b79c48a2197453f56a 100644 (file)
@@ -1066,6 +1066,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
                     },
         },
+       {
+        .callback = disable_acpi_pci,
+        .ident = "HP xw9300",
+        .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
+                   },
+       },
        {}
 };
 
index 1a2076ce6f6a7107346a46ce29cfb8a956d2aed9..ec0fd3cfa774aa575fe6c3cf42da2af2c0e56f96 100644 (file)
@@ -332,10 +332,11 @@ static int __init ppro_init(char ** cpu_type)
 {
        __u8 cpu_model = boot_cpu_data.x86_model;
 
-       if (cpu_model > 0xd)
+       if (cpu_model == 14)
+               *cpu_type = "i386/core";
+       else if (cpu_model > 0xd)
                return 0;
-
-       if (cpu_model == 9) {
+       else if (cpu_model == 9) {
                *cpu_type = "i386/p6_mobile";
        } else if (cpu_model > 5) {
                *cpu_type = "i386/piii";
index f6a8853cd1b48fb8df9bbdab6beeca4ed886cc4a..9ea35398e10d0b9a28d43152d274a3b40dd05258 100644 (file)
@@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
 CONFIG_NUMA=y
-CONFIG_NODES_SHIFT=8
+CONFIG_NODES_SHIFT=10
 CONFIG_VIRTUAL_MEM_MAP=y
 CONFIG_HOLES_IN_ZONE=y
 CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
@@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
 CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_KOBJECT is not set
index 7956eb9058fcb873a65b5889d66f6eb6208e4c76..d58c1c5c903a9cfdae0981411ae53604fce020f1 100644 (file)
@@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq)
        ia64_vector vec = irq_to_vector(irq);
        struct iosapic_rte_info *rte;
 
-       move_irq(irq);
+       move_native_irq(irq);
        list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
                iosapic_eoi(rte->addr, vec);
 }
@@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq)
 {
        irq_desc_t *idesc = irq_descp(irq);
 
-       move_irq(irq);
+       move_native_irq(irq);
        /*
         * Once we have recorded IRQ_PENDING already, we can mask the
         * interrupt for real. This prevents IRQ storms from unhandled
index 5ce908ef9c9585f5f0350503fa39caf0e3e732bf..9c72ea3f6432d115ca32090d0c8bd9c8365d5c07 100644 (file)
@@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 
        if (irq < NR_IRQS) {
                irq_affinity[irq] = mask;
-               set_irq_info(irq, mask);
                irq_redir[irq] = (char) (redir & 0xff);
        }
 }
index 078fb5533541d001db70eb618fb59a791d0b35f6..2d80653aa2af54e866eee220d8193dde6d663c49 100644 (file)
@@ -1636,7 +1636,7 @@ static int __init prom_find_machine_type(void)
                           compat, sizeof(compat)-1);
        if (len <= 0)
                return PLATFORM_GENERIC;
-       if (strncmp(compat, RELOC("chrp"), 4))
+       if (strcmp(compat, RELOC("chrp")))
                return PLATFORM_GENERIC;
 
        /* Default to pSeries. We need to know if we are running LPAR */
index 5eb55ef1c91cd20a99ffe811596458c7d8acf326..5f79f01c44f2aff1d88c8edf2375d783195584d1 100644 (file)
@@ -255,7 +255,7 @@ static int __init pSeries_init_panel(void)
 {
        /* Manually leave the kernel version on the panel. */
        ppc_md.progress("Linux ppc64\n", 0);
-       ppc_md.progress(system_utsname.version, 0);
+       ppc_md.progress(system_utsname.release, 0);
 
        return 0;
 }
index ef5b9c44b86b50ac5c7f80fd3e0ddda66dcf3f1a..4d53b2739357d21aa707d838184dedf29486e220 100644 (file)
@@ -1650,3 +1650,11 @@ sys_tee_wrapper:
        llgfr   %r4,%r4                 # size_t
        llgfr   %r5,%r5                 # unsigned int
        jg      sys_tee
+
+       .globl compat_sys_vmsplice_wrapper
+compat_sys_vmsplice_wrapper:
+       lgfr    %r2,%r2                 # int
+       llgtr   %r3,%r3                 # compat_iovec *
+       llgfr   %r4,%r4                 # unsigned int
+       llgfr   %r5,%r5                 # unsigned int
+       jg      compat_sys_vmsplice
index fc2c0767202b22d6595f940dc7f278d36e704810..93be1d56c03662d46da46e1effc788a954dbc096 100644 (file)
@@ -317,3 +317,4 @@ SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapp
 SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
 SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
 SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
+SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper)
index fea043b69b913953c95d5c9ab97307d26e0083c5..029f09901b851fbd561e18d3be858e805df2bcb8 100644 (file)
@@ -249,18 +249,19 @@ static inline void stop_hz_timer(void)
        unsigned long flags;
        unsigned long seq, next;
        __u64 timer, todval;
+       int cpu = smp_processor_id();
 
        if (sysctl_hz_timer != 0)
                return;
 
-       cpu_set(smp_processor_id(), nohz_cpu_mask);
+       cpu_set(cpu, nohz_cpu_mask);
 
        /*
         * Leave the clock comparator set up for the next timer
         * tick if either rcu or a softirq is pending.
         */
-       if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
-               cpu_clear(smp_processor_id(), nohz_cpu_mask);
+       if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
+               cpu_clear(cpu, nohz_cpu_mask);
                return;
        }
 
index 44adcc2d5e5be43299d8f640161c6ec50658c285..1f6ecc62061d9121633e53edf672dec81b2da7c3 100644 (file)
@@ -12,9 +12,10 @@ static int
 check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
 {
         if (hwdev && bus + size > *hwdev->dma_mask) {
-               printk(KERN_ERR
-                   "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
-              name, (long long)bus, size, (long long)*hwdev->dma_mask);
+               if (*hwdev->dma_mask >= 0xffffffffULL)
+                       printk(KERN_ERR
+                           "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+                               name, (long long)bus, size, (long long)*hwdev->dma_mask);
                return 0;
        }
        return 1;
index 6b87268c5c2e8711e16c3066e1661828ddb3f7a9..cea335e8746ca451b6b8571dfa052785e07a27aa 100644 (file)
@@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
 {
        if (regs->eflags & X86_EFLAGS_IF)
                local_irq_disable();
+       /* Make sure to not schedule here because we could be running
+          on an exception stack. */
        preempt_enable_no_resched();
 }
 
@@ -483,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
 {
        struct task_struct *tsk = current;
 
-       conditional_sti(regs);
-
        tsk->thread.error_code = error_code;
        tsk->thread.trap_no = trapnr;
 
@@ -521,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
                                                        == NOTIFY_STOP) \
                return; \
+       conditional_sti(regs);                                          \
        do_trap(trapnr, signr, str, regs, error_code, NULL); \
 }
 
@@ -535,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
                                                        == NOTIFY_STOP) \
                return; \
+       conditional_sti(regs);                                          \
        do_trap(trapnr, signr, str, regs, error_code, &info); \
 }
 
@@ -548,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
 DO_ERROR(18, SIGSEGV, "reserved", reserved)
-DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
+
+/* Runs on IST stack */
+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+{
+       if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+                       12, SIGBUS) == NOTIFY_STOP)
+               return;
+       preempt_conditional_sti(regs);
+       do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+       preempt_conditional_cli(regs);
+}
 
 asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
 {
@@ -682,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
        if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
                return;
        }
+       preempt_conditional_sti(regs);
        do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
-       return;
+       preempt_conditional_cli(regs);
 }
 
 /* Help handler running on IST stack to switch back to user stack
index 15ae9fcd65a700c9f9060860f0fce56f7f6f3fcf..e1513532df293447a529cd7477b6bbca67363e18 100644 (file)
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
 static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
 static int found_add_area __initdata;
-int hotadd_percent __initdata = 10;
+int hotadd_percent __initdata = 0;
+#ifndef RESERVE_HOTADD
+#define hotadd_percent 0       /* Ignore all settings */
+#endif
 static u8 pxm2node[256] = { [0 ... 255] = 0xff };
 
 /* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@ static __init void bad_srat(void)
        int i;
        printk(KERN_ERR "SRAT: SRAT not used.\n");
        acpi_numa = -1;
+       found_add_area = 0;
        for (i = 0; i < MAX_LOCAL_APIC; i++)
                apicid_to_node[i] = NUMA_NO_NODE;
        for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
        int pxm, node;
        if (srat_disabled())
                return;
-       if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {                bad_srat();
+       if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+               bad_srat();
                return;
        }
        if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
        allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
        allowed = (allowed / 100) * hotadd_percent;
        if (allocated + mem > allowed) {
+               unsigned long range;
                /* Give them at least part of their hotadd memory upto hotadd_percent
                   It would be better to spread the limit out
                   over multiple hotplug areas, but that is too complicated
                   right now */
                if (allocated >= allowed)
                        return 0;
-               pages = (allowed - allocated + mem) / sizeof(struct page);
+               range = allowed - allocated;
+               pages = (range / PAGE_SIZE);
                mem = pages * sizeof(struct page);
-               nd->end = nd->start + pages*PAGE_SIZE;
+               nd->end = nd->start + range;
        }
        /* Not completely fool proof, but a good sanity check */
        addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
index 402296670d3a555acfecbe66ceff162edb887e99..78d928f9d9f1abaa3c3a0b46e7deb69cd9f65a16 100644 (file)
@@ -291,7 +291,7 @@ config SX
 
 config RIO
        tristate "Specialix RIO system support"
-       depends on SERIAL_NONSTANDARD && !64BIT
+       depends on SERIAL_NONSTANDARD
        help
          This is a driver for the Specialix RIO, a smart serial card which
          drives an outboard box that can support up to 128 ports.  Product
index 3ec73d1a279abf0b23e9676bb21ddaef54cd3cef..179cdbea712bc829467ae54b60a6e80fd3b2c4c3 100644 (file)
 #ifndef __rio_host_h__
 #define __rio_host_h__
 
-#ifdef SCCS_LABELS
-#ifndef lint
-static char *_host_h_sccs_ = "@(#)host.h       1.2";
-#endif
-#endif
-
 /*
 ** the host structure - one per host card in the system.
 */
@@ -77,9 +71,6 @@ struct Host {
 #define RC_STARTUP            1
 #define RC_RUNNING            2
 #define RC_STUFFED            3
-#define RC_SOMETHING          4
-#define RC_SOMETHING_NEW      5
-#define RC_SOMETHING_ELSE     6
 #define RC_READY              7
 #define RUN_STATE             7
 /*
index acda9326c2efa900a04636ccbd1c128a364e71bf..290143addd34cc8be99c1dbf5252746c48e3f5bc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/termios.h>
 #include <linux/serial.h>
+#include <linux/vmalloc.h>
 #include <asm/semaphore.h>
 #include <linux/generic_serial.h>
 #include <linux/errno.h>
index d31aba62bb7f362d87d8e38961f0abc2ba197abc..75b2557c37ec77c83cabb51ce01e9afcc5bccb00 100644 (file)
@@ -1394,14 +1394,17 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
                return RIO_FAIL;
        }
 
-       if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) {
-               rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum);
+       if ((PortP->InUse == (typeof(PortP->InUse))-1) ||
+                       !(CmdBlkP = RIOGetCmdBlk())) {
+               rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block "
+                       "for command %d on port %d\n", Cmd, PortP->PortNum);
                return RIO_FAIL;
        }
 
-       rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse);
+       rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n",
+                       CmdBlkP, PortP->InUse);
 
-       PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0];
+       PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
 
        CmdBlkP->Packet.src_unit = 0;
        if (PortP->SecondBlock)
@@ -1425,38 +1428,46 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
 
        switch (Cmd) {
        case MEMDUMP:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
+                               "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
                PktCmdP->SubCommand = MEMDUMP;
                PktCmdP->SubAddr = SubCmd.Addr;
                break;
        case FCLOSE:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
+                               CmdBlkP);
                break;
        case READ_REGISTER:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
+                               "command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
                PktCmdP->SubCommand = READ_REGISTER;
                PktCmdP->SubAddr = SubCmd.Addr;
                break;
        case RESUME:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
+                               CmdBlkP);
                break;
        case RFLUSH:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
+                               CmdBlkP);
                CmdBlkP->PostFuncP = RIORFlushEnable;
                break;
        case SUSPEND:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
+                               CmdBlkP);
                break;
 
        case MGET:
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
+                               CmdBlkP);
                break;
 
        case MSET:
        case MBIC:
        case MBIS:
                CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
-               rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP);
+               rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
+                               "blk %p\n", CmdBlkP);
                break;
 
        case WFLUSH:
@@ -1465,12 +1476,14 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
                 ** allowed then we should not bother sending any more to the
                 ** RTA.
                 */
-               if ((int) ((char) PortP->WflushFlag) == (int) -1) {
-                       rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!");
+               if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) {
+                       rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, "
+                                       "WflushFlag about to wrap!");
                        RIOFreeCmdBlk(CmdBlkP);
                        return (RIO_FAIL);
                } else {
-                       rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP);
+                       rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command "
+                                       "blk %p\n", CmdBlkP);
                        CmdBlkP->PostFuncP = RIOWFlushMark;
                }
                break;
index 14b83fae75c8ed1abc890615337e546decfa7850..e8af5b30519e39e64381c58e1a0e709a67340cbe 100644 (file)
 #ifndef        __rioioctl_h__
 #define        __rioioctl_h__
 
-#ifdef SCCS_LABELS
-static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h       1.2";
-#endif
-
 /*
 ** RIO device driver - user ioctls and associated structures.
 */
@@ -44,55 +40,13 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h    1.2";
 struct portStats {
        int port;
        int gather;
-       ulong txchars;
-       ulong rxchars;
-       ulong opens;
-       ulong closes;
-       ulong ioctls;
+       unsigned long txchars;
+       unsigned long rxchars;
+       unsigned long opens;
+       unsigned long closes;
+       unsigned long ioctls;
 };
 
-
-#define rIOC   ('r'<<8)
-#define        TCRIOSTATE      (rIOC | 1)
-#define        TCRIOXPON       (rIOC | 2)
-#define        TCRIOXPOFF      (rIOC | 3)
-#define        TCRIOXPCPS      (rIOC | 4)
-#define        TCRIOXPRINT     (rIOC | 5)
-#define TCRIOIXANYON   (rIOC | 6)
-#define        TCRIOIXANYOFF   (rIOC | 7)
-#define TCRIOIXONON    (rIOC | 8)
-#define        TCRIOIXONOFF    (rIOC | 9)
-#define        TCRIOMBIS       (rIOC | 10)
-#define        TCRIOMBIC       (rIOC | 11)
-#define        TCRIOTRIAD      (rIOC | 12)
-#define TCRIOTSTATE    (rIOC | 13)
-
-/*
-** 15.10.1998 ARG - ESIL 0761 part fix
-** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS
-** appears to not support hardware flow control).
-*/
-#define TCRIOCTSFLOWEN (rIOC | 14)     /* enable CTS flow control */
-#define TCRIOCTSFLOWDIS        (rIOC | 15)     /* disable CTS flow control */
-#define TCRIORTSFLOWEN (rIOC | 16)     /* enable RTS flow control */
-#define TCRIORTSFLOWDIS        (rIOC | 17)     /* disable RTS flow control */
-
-/*
-** 09.12.1998 ARG - ESIL 0776 part fix
-** Definition for 'RIOC' also appears in daemon.h, so we'd better do a
-** #ifndef here first.
-** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now
-** allowed to be used by customers.
-**
-** 05.02.1999 ARG -
-** This is what I've decied to do with ioctls etc., which are intended to be
-** invoked from users applications :
-** Anything that needs to be defined here will be removed from daemon.h, that
-** way it won't end up having to be defined/maintained in two places. The only
-** consequence of this is that this file should now be #include'd by daemon.h
-**
-** 'stats' ioctls now #define'd here as they are to be used by customers.
-*/
 #define        RIOC    ('R'<<8)|('i'<<16)|('o'<<24)
 
 #define        RIO_QUICK_CHECK         (RIOC | 105)
index 1efde3b27619b4113ca95ee1ca3116f6409b0ca2..fe00c7dfb649bf054376aa30522cb6c1ed2098e7 100644 (file)
@@ -22,7 +22,7 @@ config TCG_TPM
 
 config TCG_TIS
        tristate "TPM Interface Specification 1.2 Interface"
-       depends on TCG_TPM
+       depends on TCG_TPM && PNPACPI
        ---help---
          If you have a TPM security chip that is compliant with the
          TCG TIS 1.2 TPM specification say Yes and it will be accessible
index 54a4c804e25f30fc38eed78e5a4b32d2e07f1bc9..050ced247f68018224addcc572e7faa7f6f47fec 100644 (file)
@@ -140,7 +140,7 @@ extern int tpm_pm_resume(struct device *);
 extern struct dentry ** tpm_bios_log_setup(char *);
 extern void tpm_bios_log_teardown(struct dentry **);
 #else
-static inline struct dentry* tpm_bios_log_setup(char *name)
+static inline struct dentry ** tpm_bios_log_setup(char *name)
 {
        return NULL;
 }
index b9cae9a238bb651d7814740b013ae39899d9f727..f621168f38aec754a418cf4390d9460506a08dec 100644 (file)
@@ -55,7 +55,7 @@ enum tis_int_flags {
 };
 
 enum tis_defaults {
-       TIS_MEM_BASE = 0xFED4000,
+       TIS_MEM_BASE = 0xFED40000,
        TIS_MEM_LEN = 0x5000,
        TIS_SHORT_TIMEOUT = 750,        /* ms */
        TIS_LONG_TIMEOUT = 2000,        /* 2 sec */
index a13395e2c372a9b27355040c9c94eae21040992b..fa2ba9ebe42aacf3478212143547c460d877402e 100644 (file)
  *     82801E   (C-ICH)  : document number 273599-001, 273645-002,
  *     82801EB  (ICH5)   : document number 252516-001, 252517-003,
  *     82801ER  (ICH5R)  : document number 252516-001, 252517-003,
- *     82801FB  (ICH6)   : document number 301473-002, 301474-007,
- *     82801FR  (ICH6R)  : document number 301473-002, 301474-007,
- *     82801FBM (ICH6-M) : document number 301473-002, 301474-007,
- *     82801FW  (ICH6W)  : document number 301473-001, 301474-007,
- *     82801FRW (ICH6RW) : document number 301473-001, 301474-007
  *
  *  20000710 Nils Faerber
  *     Initial Version 0.01
  *  20050807 Wim Van Sebroeck <wim@iguana.be>
  *     0.08 Make sure that the watchdog is only "armed" when started.
  *          (Kernel Bug 4251)
+ *  20060416 Wim Van Sebroeck <wim@iguana.be>
+ *     0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
+ *          ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
+ *          chipsets)
  */
 
 /*
@@ -90,7 +89,7 @@
 #include "i8xx_tco.h"
 
 /* Module and version information */
-#define TCO_VERSION "0.08"
+#define TCO_VERSION "0.09"
 #define TCO_MODULE_NAME "i8xx TCO timer"
 #define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
 #define PFX TCO_MODULE_NAME ": "
@@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = {
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,  PCI_ANY_ID, PCI_ANY_ID, },
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0,    PCI_ANY_ID, PCI_ANY_ID, },
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,   PCI_ANY_ID, PCI_ANY_ID, },
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,      PCI_ANY_ID, PCI_ANY_ID, },
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,      PCI_ANY_ID, PCI_ANY_ID, },
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2,      PCI_ANY_ID, PCI_ANY_ID, },
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,      PCI_ANY_ID, PCI_ANY_ID, },
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,      PCI_ANY_ID, PCI_ANY_ID, },
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,       PCI_ANY_ID, PCI_ANY_ID, },
        { 0, },                 /* End of list */
 };
index 9dc54736e4eb627d5e843bda4e2bc2a243aa0840..1ea04e9b2b0b0938cfc2cbfa04b2eed12b4da01b 100644 (file)
@@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
        if (tmr_atboot && started == 0) {
                printk(KERN_INFO PFX "Starting Watchdog Timer\n");
                s3c2410wdt_start();
+       } else if (!tmr_atboot) {
+               /* if we're not enabling the watchdog, then ensure it is
+                * disabled if it has been left running from the bootloader
+                * or other source */
+
+               s3c2410wdt_stop();
        }
 
        return 0;
index 515ce757204996c188589e74ce29341f8b41ab76..20b88f9b7be20d9081c58a72e217a89a394661f2 100644 (file)
@@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void)
 {
        int ret;
 
-       printk(banner);
+       printk("%s\n", banner);
 
        spin_lock_init(&sc1200wdt_lock);
        sema_init(&open_sem, 1);
index 4961f1e764a755bf4729951b7447620dea02d04f..602797a4420877f6fc74e659a05b4c50be8d54d6 100644 (file)
@@ -392,6 +392,7 @@ static struct pcmcia_device_id ide_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
        PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
        PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
+       PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
        PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
index 19222878aae92943f5528d09e67582a517048024..11f13778f13915e35bea6075dae7676e717952f9 100644 (file)
@@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
         * register content.
         * To actually enable physical responses is the job of our interrupt
         * handler which programs the physical request filter. */
-       reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
+       reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
 
        DBGMSG("physUpperBoundOffset=%08x",
               reg_read(ohci, OHCI1394_PhyUpperBound));
index f4206604db0378fb33fda046cb403f4a346cdb45..8a23fb54c6939bdbdac285570a45974e74740b48 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/string.h>
+#include <linux/stringify.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/fs.h>
@@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
  */
 static int max_sectors = SBP2_MAX_SECTORS;
 module_param(max_sectors, int, 0444);
-MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
+MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
+                __stringify(SBP2_MAX_SECTORS) ")");
 
 /*
  * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644);
 MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
 
 /*
- * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
- * if your sbp2 device is not properly handling the SCSI inquiry command.
- * This hack makes the inquiry look more like a typical MS Windows inquiry
- * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
+ * If any of the following workarounds is required for your device to work,
+ * please submit the kernel messages logged by sbp2 to the linux1394-devel
+ * mailing list.
  *
- * If force_inquiry_hack=1 is required for your device to work,
- * please submit the logged sbp2_firmware_revision value of this device to
- * the linux1394-devel mailing list.
+ * - 128kB max transfer
+ *   Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ *   When scsi_mod probes the device, let the inquiry command look like that
+ *   from MS Windows.
+ *
+ * - skip mode page 8
+ *   Suppress sending of mode_sense for mode page 8 if the device pretends to
+ *   support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ *   Tell sd_mod to correct the last sector number reported by read_capacity.
+ *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ *   Don't use this with devices which don't have this bug.
+ *
+ * - override internal blacklist
+ *   Instead of adding to the built-in blacklist, use only the workarounds
+ *   specified in the module load parameter.
+ *   Useful if a blacklist entry interfered with a non-broken device.
  */
+static int sbp2_default_workarounds;
+module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+       ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+       ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
+       ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+       ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+       ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+       ", or a combination)");
+
+/* legacy parameter */
 static int force_inquiry_hack;
 module_param(force_inquiry_hack, int, 0644);
-MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
+MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
 
 /*
  * Export information about protocols/devices supported by this driver.
@@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = {
 };
 
 /*
- * List of device firmwares that require the inquiry hack.
- * Yields a few false positives but did not break other devices so far.
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best indicator
+ * for the type of bridge chip of a device.  It yields a few false positives
+ * but this did not break correctly behaving devices so far.
  */
-static u32 sbp2_broken_inquiry_list[] = {
-       0x00002800,     /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
-                       /* DViCO Momobay CX-1 */
-       0x00000200      /* Andreas Plesch <plesch@fas.harvard.edu> */
-                       /* QPS Fire DVDBurner */
+static const struct {
+       u32 firmware_revision;
+       u32 model_id;
+       unsigned workarounds;
+} sbp2_workarounds_table[] = {
+       /* TSB42AA9 */ {
+               .firmware_revision      = 0x002800,
+               .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
+                                         SBP2_WORKAROUND_MODE_SENSE_8,
+       },
+       /* Initio bridges, actually only needed for some older ones */ {
+               .firmware_revision      = 0x000200,
+               .workarounds            = SBP2_WORKAROUND_INQUIRY_36,
+       },
+       /* Symbios bridge */ {
+               .firmware_revision      = 0xa0b800,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
+       /*
+        * Note about the following Apple iPod blacklist entries:
+        *
+        * There are iPods (2nd gen, 3rd gen) with model_id==0.  Since our
+        * matching logic treats 0 as a wildcard, we cannot match this ID
+        * without rewriting the matching routine.  Fortunately these iPods
+        * do not feature the read_capacity bug according to one report.
+        * Read_capacity behaviour as well as model_id could change due to
+        * Apple-supplied firmware updates though.
+        */
+       /* iPod 4th generation */ {
+               .firmware_revision      = 0x0a2700,
+               .model_id               = 0x000021,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod mini */ {
+               .firmware_revision      = 0x0a2700,
+               .model_id               = 0x000023,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod Photo */ {
+               .firmware_revision      = 0x0a2700,
+               .model_id               = 0x00007e,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       }
 };
 
 /**************************************
@@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
 
        /* Register the status FIFO address range. We could use the same FIFO
         * for targets at different nodes. However we need different FIFOs per
-        * target in order to support multi-unit devices. */
+        * target in order to support multi-unit devices.
+        * The FIFO is located out of the local host controller's physical range
+        * but, if possible, within the posted write area. Status writes will
+        * then be performed as unified transactions. This slightly reduces
+        * bandwidth usage, and some Prolific based devices seem to require it.
+        */
        scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
                        &sbp2_highlevel, ud->ne->host, &sbp2_ops,
                        sizeof(struct sbp2_status_block), sizeof(quadlet_t),
-                       ~0ULL, ~0ULL);
+                       0x010000000000ULL, CSR1212_ALL_SPACE_END);
        if (!scsi_id->status_fifo_addr) {
                SBP2_ERR("failed to allocate status FIFO address range");
                goto failed_alloc;
@@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
        struct csr1212_dentry *dentry;
        u64 management_agent_addr;
        u32 command_set_spec_id, command_set, unit_characteristics,
-           firmware_revision, workarounds;
+           firmware_revision;
+       unsigned workarounds;
        int i;
 
        SBP2_DEBUG_ENTER();
@@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
                case SBP2_FIRMWARE_REVISION_KEY:
                        /* Firmware revision */
                        firmware_revision = kv->value.immediate;
-                       if (force_inquiry_hack)
-                               SBP2_INFO("sbp2_firmware_revision = %x",
-                                         (unsigned int)firmware_revision);
-                       else
-                               SBP2_DEBUG("sbp2_firmware_revision = %x",
-                                          (unsigned int)firmware_revision);
+                       SBP2_DEBUG("sbp2_firmware_revision = %x",
+                                  (unsigned int)firmware_revision);
                        break;
 
                default:
@@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
                }
        }
 
-       /* This is the start of our broken device checking. We try to hack
-        * around oddities and known defects.  */
-       workarounds = 0x0;
+       workarounds = sbp2_default_workarounds;
+       if (force_inquiry_hack) {
+               SBP2_WARN("force_inquiry_hack is deprecated. "
+                         "Use parameter 'workarounds' instead.");
+               workarounds |= SBP2_WORKAROUND_INQUIRY_36;
+       }
 
-       /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
-        * bridge with 128KB max transfer size limitation. For sanity, we
-        * only voice this when the current max_sectors setting
-        * exceeds the 128k limit. By default, that is not the case.
-        *
-        * It would be really nice if we could detect this before the scsi
-        * host gets initialized. That way we can down-force the
-        * max_sectors to account for it. That is not currently
-        * possible.  */
-       if ((firmware_revision & 0xffff00) ==
-                       SBP2_128KB_BROKEN_FIRMWARE &&
-                       (max_sectors * 512) > (128*1024)) {
-               SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
-                               NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
-               SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
-                               max_sectors);
-               workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
-       }
-
-       /* Check for a blacklisted set of devices that require us to force
-        * a 36 byte host inquiry. This can be overriden as a module param
-        * (to force all hosts).  */
-       for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
-               if ((firmware_revision & 0xffff00) ==
-                               sbp2_broken_inquiry_list[i]) {
-                       SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
-                                       NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
-                       workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
-                       break; /* No need to continue. */
+       if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
+               for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+                       if (sbp2_workarounds_table[i].firmware_revision &&
+                           sbp2_workarounds_table[i].firmware_revision !=
+                           (firmware_revision & 0xffff00))
+                               continue;
+                       if (sbp2_workarounds_table[i].model_id &&
+                           sbp2_workarounds_table[i].model_id != ud->model_id)
+                               continue;
+                       workarounds |= sbp2_workarounds_table[i].workarounds;
+                       break;
                }
-       }
+
+       if (workarounds)
+               SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
+                         "(firmware_revision 0x%06x, vendor_id 0x%06x,"
+                         " model_id 0x%06x)",
+                         NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+                         workarounds, firmware_revision,
+                         ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+                         ud->model_id);
+
+       /* We would need one SCSI host template for each target to adjust
+        * max_sectors on the fly, therefore warn only. */
+       if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+           (max_sectors * 512) > (128 * 1024))
+               SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
+                         "max transfer size. WARNING: Current max_sectors "
+                         "setting is larger than 128KB (%d sectors)",
+                         NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+                         max_sectors);
 
        /* If this is a logical unit directory entry, process the parent
         * to get the values. */
@@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
 
        scsi_id->sdev = sdev;
 
-       if (force_inquiry_hack ||
-           scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
+       if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
                sdev->inquiry_len = 36;
-               sdev->skip_ms_page_8 = 1;
-       }
        return 0;
 }
 
 static int sbp2scsi_slave_configure(struct scsi_device *sdev)
 {
+       struct scsi_id_instance_data *scsi_id =
+               (struct scsi_id_instance_data *)sdev->host->hostdata[0];
+
        blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
        sdev->use_10_for_rw = 1;
        sdev->use_10_for_ms = 1;
+
+       if (sdev->type == TYPE_DISK &&
+           scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+               sdev->skip_ms_page_8 = 1;
+       if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+               sdev->fix_capacity = 1;
        return 0;
 }
 
@@ -2603,7 +2684,9 @@ static int sbp2_module_init(void)
                scsi_driver_template.cmd_per_lun = 1;
        }
 
-       /* Set max sectors (module load option). Default is 255 sectors. */
+       if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+           (max_sectors * 512) > (128 * 1024))
+               max_sectors = 128 * 1024 / 512;
        scsi_driver_template.max_sectors = max_sectors;
 
        /* Register our high level driver with 1394 stack */
index e2d357a9ea3a46d9f3a13328f1f3a3f1b3412149..f4ccc9d0fba4106d6a8da4e6f886b45f3172993f 100644 (file)
@@ -226,11 +226,6 @@ struct sbp2_status_block {
 #define SBP2_UNIT_SPEC_ID_ENTRY                                        0x0000609e
 #define SBP2_SW_VERSION_ENTRY                                  0x00010483
 
-/*
- * Other misc defines
- */
-#define SBP2_128KB_BROKEN_FIRMWARE                             0xa0b800
-
 /*
  * SCSI specific stuff
  */
@@ -239,6 +234,13 @@ struct sbp2_status_block {
 #define SBP2_MAX_SECTORS               255     /* Max sectors supported */
 #define SBP2_MAX_CMDS                  8       /* This should be safe */
 
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36     0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8   0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY   0x8
+#define SBP2_WORKAROUND_OVERRIDE       0x100
+
 /* This is the two dma types we use for cmd_dma below */
 enum cmd_dma_types {
        CMD_DMA_NONE,
@@ -268,10 +270,6 @@ struct sbp2_command_info {
 
 };
 
-/* A list of flags for detected oddities and brokeness. */
-#define SBP2_BREAKAGE_128K_MAX_TRANSFER                0x1
-#define SBP2_BREAKAGE_INQUIRY_HACK             0x2
-
 struct sbp2scsi_host_info;
 
 /*
@@ -345,7 +343,7 @@ struct scsi_id_instance_data {
        struct Scsi_Host *scsi_host;
 
        /* Device specific workarounds/brokeness */
-       u32 workarounds;
+       unsigned workarounds;
 };
 
 /* Sbp2 host data structure (one per IEEE1394 host) */
index 36a32c315668cebbc7e34be30704b0c4851bcf66..efe147dbeb42137e695298e19c525e1acf027a07 100644 (file)
@@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
         */
 
        work = kmalloc(sizeof *work, GFP_KERNEL);
-       if (!work)
+       if (!work) {
+               mmput(mm);
                return;
+       }
 
        INIT_WORK(&work->work, ib_umem_account, work);
        work->mm   = mm;
index 1985b5dfa481bdb91efaf5611bba73f83de3a1b1..798e13e14faf826695d8b64a0493a5580e24bd73 100644 (file)
@@ -182,7 +182,7 @@ struct mthca_cmd_context {
        u8                status;
 };
 
-static int fw_cmd_doorbell = 1;
+static int fw_cmd_doorbell = 0;
 module_param(fw_cmd_doorbell, int, 0644);
 MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
                 "(and supported by FW)");
index 19765f6f8d5868eca6845d2319819bccdf181e64..07c13be07a4a5da858a975fe6d11059b0da3a35b 100644 (file)
@@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        ind = qp->rq.next_ind;
 
-       for (nreq = 0; wr; ++nreq, wr = wr->next) {
-               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
-                       nreq = 0;
-
-                       doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
-                       doorbell[1] = cpu_to_be32(qp->qpn << 8);
-
-                       wmb();
-
-                       mthca_write64(doorbell,
-                                     dev->kar + MTHCA_RECEIVE_DOORBELL,
-                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
-                       qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
-                       size0 = 0;
-               }
-
+       for (nreq = 0; wr; wr = wr->next) {
                if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
                        mthca_err(dev, "RQ %06x full (%u head, %u tail,"
                                        " %d max, %d nreq)\n", qp->qpn,
@@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                ++ind;
                if (unlikely(ind >= qp->rq.max))
                        ind -= qp->rq.max;
+
+               ++nreq;
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+                       doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+                       wmb();
+
+                       mthca_write64(doorbell,
+                                     dev->kar + MTHCA_RECEIVE_DOORBELL,
+                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+                       qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+                       size0 = 0;
+               }
        }
 
 out:
index c32ce4348e1b817fd59c4c9425a07ca853881a49..9cbdffa08dc2bca6da821638efc0078b24792374 100644 (file)
@@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
        /* XXX should send SRP_I_LOGOUT request */
 
        init_completion(&target->done);
-       ib_send_cm_dreq(target->cm_id, NULL, 0);
+       if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+               printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
+               return;
+       }
        wait_for_completion(&target->done);
 }
 
@@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr)
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state != SRP_TARGET_DEAD) {
                spin_unlock_irq(target->scsi_host->host_lock);
-               scsi_host_put(target->scsi_host);
                return;
        }
        target->state = SRP_TARGET_REMOVED;
@@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr)
        ib_destroy_cm_id(target->cm_id);
        srp_free_target_ib(target);
        scsi_host_put(target->scsi_host);
-       /* And another put to really free the target port... */
-       scsi_host_put(target->scsi_host);
 }
 
 static int srp_connect_target(struct srp_target_port *target)
@@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
        list_for_each_entry_safe(req, tmp, &target->req_queue, list)
                if (req->scmnd->device == scmnd->device) {
                        req->scmnd->result = DID_RESET << 16;
-                       scmnd->scsi_done(scmnd);
+                       req->scmnd->scsi_done(req->scmnd);
                        srp_remove_req(target, req);
                }
 
index 9b493f0becc4d370541742a7fec7f4ef6aed43df..173c899a1fb40cb9eca8ff5b02844a95519b2527 100644 (file)
@@ -1499,7 +1499,6 @@ static int __init capi_init(void)
                printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
                return major_ret;
        }
-       capi_major = major_ret;
        capi_class = class_create(THIS_MODULE, "capi");
        if (IS_ERR(capi_class)) {
                unregister_chrdev(capi_major, "capi20");
index bfb73fd5077e85aa148a48dbbf5165f5185c5c19..d86ab68114b0c7848126661c2009816658093c9d 100644 (file)
@@ -710,8 +710,8 @@ static int gigaset_probe(struct usb_interface *interface,
        retval = -ENODEV; //FIXME
 
        /* See if the device offered us matches what we can accept */
-       if ((le16_to_cpu(udev->descriptor.idVendor  != USB_M105_VENDOR_ID)) ||
-           (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
+       if ((le16_to_cpu(udev->descriptor.idVendor)  != USB_M105_VENDOR_ID) ||
+           (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
                return -ENODEV;
 
        /* this starts to become ascii art... */
index 3f5b647945426d5db0c1ffa7793b6fa8dc37e482..626506234b76ca62b29b34708e820f122470e2db 100644 (file)
@@ -4,8 +4,11 @@ menu "LED devices"
 config NEW_LEDS
        bool "LED Support"
        help
-         Say Y to enable Linux LED support.  This is not related to standard
-         keyboard LEDs which are controlled via the input system.
+         Say Y to enable Linux LED support.  This allows control of supported
+         LEDs from both userspace and optionally, by kernel events (triggers).
+
+         This is not related to standard keyboard LEDs which are controlled
+         via the input system.
 
 config LEDS_CLASS
        tristate "LED Class Support"
index b0b5d05fadd62a106c5e49f027608ad55fb093a4..c75d0ef1609cb900d0cc014620bccbe8fbff2e7c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sysdev.h>
 #include <linux/timer.h>
 #include <linux/err.h>
+#include <linux/ctype.h>
 #include <linux/leds.h>
 #include "leds.h"
 
@@ -43,9 +44,13 @@ static ssize_t led_brightness_store(struct class_device *dev,
        ssize_t ret = -EINVAL;
        char *after;
        unsigned long state = simple_strtoul(buf, &after, 10);
+       size_t count = after - buf;
 
-       if (after - buf > 0) {
-               ret = after - buf;
+       if (*after && isspace(*after))
+               count++;
+
+       if (count == size) {
+               ret = count;
                led_set_brightness(led_cdev, state);
        }
 
index f484b5d6dbf86494ad9eb0f3c07cd276743a30b9..fbf141ef46ec3488779a54c616cdff600f38890d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/device.h>
 #include <linux/sysdev.h>
 #include <linux/timer.h>
+#include <linux/ctype.h>
 #include <linux/leds.h>
 #include "leds.h"
 
@@ -69,11 +70,15 @@ static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
        int ret = -EINVAL;
        char *after;
        unsigned long state = simple_strtoul(buf, &after, 10);
+       size_t count = after - buf;
 
-       if (after - buf > 0) {
+       if (*after && isspace(*after))
+               count++;
+
+       if (count == size) {
                timer_data->delay_on = state;
                mod_timer(&timer_data->timer, jiffies + 1);
-               ret = after - buf;
+               ret = count;
        }
 
        return ret;
@@ -97,11 +102,15 @@ static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
        int ret = -EINVAL;
        char *after;
        unsigned long state = simple_strtoul(buf, &after, 10);
+       size_t count = after - buf;
+
+       if (*after && isspace(*after))
+               count++;
 
-       if (after - buf > 0) {
+       if (count == size) {
                timer_data->delay_off = state;
                mod_timer(&timer_data->timer, jiffies + 1);
-               ret = after - buf;
+               ret = count;
        }
 
        return ret;
index 914d62b2406411ca76f707c4ccf8d42a58d4f583..5dc4bee7abebfb9c8685e61306ae54bdff754060 100644 (file)
@@ -310,7 +310,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
                }
                else
                        data->bytes_xfered =
-                               (data->blocks * (1 << data->blksz_bits)) -
+                               (data->blocks * data->blksz) -
                                host->pio.len;
        }
 
@@ -575,7 +575,7 @@ static int
 au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
 {
 
-       int datalen = data->blocks * (1 << data->blksz_bits);
+       int datalen = data->blocks * data->blksz;
 
        if (dma != 0)
                host->flags |= HOST_F_DMA;
@@ -596,7 +596,7 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
        if (host->dma.len == 0)
                return MMC_ERR_TIMEOUT;
 
-       au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host));
+       au_writel(data->blksz - 1, HOST_BLKSIZE(host));
 
        if (host->flags & HOST_F_DMA) {
                int i;
index 79358e223f5788fdd8dac33c24ce11170ec2ca5c..a4eb1d0e7a7169e381847c978c71b7bcb4dd72f1 100644 (file)
@@ -218,8 +218,10 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host,
        if(!loops)
                return 0;
 
-       dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
-               loops, where, *pstat, stat_mask);
+       /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
+       if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
+               dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
+                       loops, where, *pstat, stat_mask);
        return loops;
 }
 
@@ -333,6 +335,9 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd,
        WARN_ON(host->cmd != NULL);
        host->cmd = cmd;
 
+       /* Ensure, that clock are stopped else command programming and start fails */
+       imxmci_stop_clock(host);
+
        if (cmd->flags & MMC_RSP_BUSY)
                cmdat |= CMD_DAT_CONT_BUSY;
 
@@ -553,7 +558,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
        int trans_done = 0;
        unsigned int stat = *pstat;
 
-       if(host->actual_bus_width == MMC_BUS_WIDTH_4)
+       if(host->actual_bus_width != MMC_BUS_WIDTH_4)
                burst_len = 16;
        else
                burst_len = 64;
@@ -591,8 +596,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
                        stat = MMC_STATUS;
 
                        /* Flush extra bytes from FIFO */
-                       while(flush_len >= 2){
-                               flush_len -= 2;
+                       while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
                                i = MMC_BUFFER_ACCESS;
                                stat = MMC_STATUS;
                                stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
@@ -746,10 +750,6 @@ static void imxmci_tasklet_fnc(unsigned long data)
                        data_dir_mask = STATUS_DATA_TRANS_DONE;
                }
 
-               imxmci_busy_wait_for_status(host, &stat,
-                               data_dir_mask,
-                               50, "imxmci_tasklet_fnc data");
-
                if(stat & data_dir_mask) {
                        clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
                        imxmci_data_done(host, stat);
@@ -865,7 +865,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
                imxmci_stop_clock(host);
                MMC_CLK_RATE = (prescaler<<3) | clk;
-               imxmci_start_clock(host);
+               /*
+                * Under my understanding, clock should not be started there, because it would
+                * initiate SDHC sequencer and send last or random command into card
+                */
+               /*imxmci_start_clock(host);*/
 
                dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
        } else {
index 1ca2c8b9c9b552b488702753bfac8d5309a625c8..6201f3086a0226d1810aab52067d0a33e22206fe 100644 (file)
@@ -951,6 +951,7 @@ static void mmc_read_scrs(struct mmc_host *host)
                data.timeout_ns = card->csd.tacc_ns * 10;
                data.timeout_clks = card->csd.tacc_clks * 10;
                data.blksz_bits = 3;
+               data.blksz = 1 << 3;
                data.blocks = 1;
                data.flags = MMC_DATA_READ;
                data.sg = &sg;
index 06bd1f4cb9b1e34c71255ef2c2f6711d0dbbbfc1..e39cc05c64c258536cae1ece7c88d24969e036cb 100644 (file)
@@ -175,6 +175,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                brq.data.timeout_ns = card->csd.tacc_ns * 10;
                brq.data.timeout_clks = card->csd.tacc_clks * 10;
                brq.data.blksz_bits = md->block_bits;
+               brq.data.blksz = 1 << md->block_bits;
                brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
                brq.stop.opcode = MMC_STOP_TRANSMISSION;
                brq.stop.arg = 0;
index f97b472085cbf2c39f3eb99b0b4fd38460aa2c8d..b49368fd96b82b051d59fe85ec00c9127ca05390 100644 (file)
@@ -119,7 +119,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
                nob = 0xffff;
 
        writel(nob, host->base + MMC_NOB);
-       writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
+       writel(data->blksz, host->base + MMC_BLKLEN);
 
        clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
        do_div(clks, 1000000000UL);
@@ -283,7 +283,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
         * data blocks as being in error.
         */
        if (data->error == MMC_ERR_NONE)
-               data->bytes_xfered = data->blocks << data->blksz_bits;
+               data->bytes_xfered = data->blocks * data->blksz;
        else
                data->bytes_xfered = 0;
 
index 39b3d97f891ecf007185170121d0abda316e4a66..8167332d4013d3e7701f108d2aea0dcd241ea700 100644 (file)
@@ -662,14 +662,14 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
        unsigned long dmaflags;
 
        DBGF("blksz %04x blks %04x flags %08x\n",
-               1 << data->blksz_bits, data->blocks, data->flags);
+               data->blksz, data->blocks, data->flags);
        DBGF("tsac %d ms nsac %d clk\n",
                data->timeout_ns / 1000000, data->timeout_clks);
 
        /*
         * Calculate size.
         */
-       host->size = data->blocks << data->blksz_bits;
+       host->size = data->blocks * data->blksz;
 
        /*
         * Check timeout values for overflow.
@@ -696,12 +696,12 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
         * Two bytes are needed for each data line.
         */
        if (host->bus_width == MMC_BUS_WIDTH_1) {
-               blksize = (1 << data->blksz_bits) + 2;
+               blksize = data->blksz + 2;
 
                wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
                wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
        } else if (host->bus_width == MMC_BUS_WIDTH_4) {
-               blksize = (1 << data->blksz_bits) + 2 * 4;
+               blksize = data->blksz + 2 * 4;
 
                wbsd_write_index(host, WBSD_IDX_PBSMSB,
                        ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
index 3d306681919e12bdc923ae95c1e282faa724dbad..d8233e0b789952c9047e2b7158e0f291b9efe772 100644 (file)
@@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 
        /* Hardware bug work-around, the chip is unable to do PCI DMA
           to/from anything above 1GB :-( */
-       if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+       if (dma_mapping_error(mapping) ||
+               mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
                /* Sigh... */
-               pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+               if (!dma_mapping_error(mapping))
+                       pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(skb);
                skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
                if (skb == NULL)
@@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
                mapping = pci_map_single(bp->pdev, skb->data,
                                         RX_PKT_BUF_SZ,
                                         PCI_DMA_FROMDEVICE);
-               if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
-                       pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
+               if (dma_mapping_error(mapping) ||
+                       mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
+                       if (!dma_mapping_error(mapping))
+                               pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
                        dev_kfree_skb_any(skb);
                        return -ENOMEM;
                }
@@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
-       if (mapping + len > B44_DMA_MASK) {
+       if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
                /* Chip can't handle DMA to/from >1GB, use bounce buffer */
-               pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
+               if (!dma_mapping_error(mapping))
+                       pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
 
                bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
                                             GFP_ATOMIC|GFP_DMA);
@@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                mapping = pci_map_single(bp->pdev, bounce_skb->data,
                                         len, PCI_DMA_TODEVICE);
-               if (mapping + len > B44_DMA_MASK) {
-                       pci_unmap_single(bp->pdev, mapping,
+               if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
+                       if (!dma_mapping_error(mapping))
+                               pci_unmap_single(bp->pdev, mapping,
                                         len, PCI_DMA_TODEVICE);
                        dev_kfree_skb_any(bounce_skb);
                        goto err_out;
@@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp)
                                             DMA_TABLE_BYTES,
                                             DMA_BIDIRECTIONAL);
 
-               if (rx_ring_dma + size > B44_DMA_MASK) {
+               if (dma_mapping_error(rx_ring_dma) ||
+                       rx_ring_dma + size > B44_DMA_MASK) {
                        kfree(rx_ring);
                        goto out_err;
                }
@@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp)
                                             DMA_TABLE_BYTES,
                                             DMA_TO_DEVICE);
 
-               if (tx_ring_dma + size > B44_DMA_MASK) {
+               if (dma_mapping_error(tx_ring_dma) ||
+                       tx_ring_dma + size > B44_DMA_MASK) {
                        kfree(tx_ring);
                        goto out_err;
                }
index 1ddefd281213d613462f05a583795921d5d3acd4..038447fb5c5ee82ef17110189497e029c01d42fd 100644 (file)
@@ -53,6 +53,7 @@
 #define DRV_VERSION    "v1.17b"
 #define DRV_RELDATE    "2006/03/10"
 #include "dl2k.h"
+#include <linux/dma-mapping.h>
 
 static char version[] __devinitdata =
       KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 
index 6f7dce8eba51c329dd7412334823131749ab6ac8..b67f586d7392ab199e29181517d18af00cc9a8e1 100644 (file)
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
                int status;
 
                dev = nds[i];
+               if (dev == NULL)
+                       continue;
 
                status = pm3386_is_link_up(i);
                if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
 
 static int __init enp2611_init_module(void)
 { 
+       int ports;
        int i;
 
        if (!machine_is_enp2611())
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
        caleb_reset();
        pm3386_reset();
 
-       for (i = 0; i < 3; i++) {
+       ports = pm3386_port_count();
+       for (i = 0; i < ports; i++) {
                nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
                if (nds[i] == NULL) {
                        while (--i >= 0)
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
 
        ixp2400_msf_init(&enp2611_msf_parameters);
 
-       if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
-               for (i = 0; i < 3; i++)
-                       free_netdev(nds[i]);
+       if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
+               for (i = 0; i < ports; i++)
+                       if (nds[i])
+                               free_netdev(nds[i]);
                return -EINVAL;
        }
 
index 5c7ab7564053af12a291f2be7875329cb6ac6e1d..5224651c9aac30e3ab308286c7d01f93c987ee79 100644 (file)
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
        pm3386_reg_write(port >> 1, reg, value);
 }
 
+int pm3386_secondary_present(void)
+{
+       return pm3386_reg_read(1, 0) == 0x3386;
+}
 
 void pm3386_reset(void)
 {
        u8 mac[3][6];
+       int secondary;
+
+       secondary = pm3386_secondary_present();
 
        /* Save programmed MAC addresses.  */
        pm3386_get_mac(0, mac[0]);
        pm3386_get_mac(1, mac[1]);
-       pm3386_get_mac(2, mac[2]);
+       if (secondary)
+               pm3386_get_mac(2, mac[2]);
 
        /* Assert analog and digital reset.  */
        pm3386_reg_write(0, 0x002, 0x0060);
-       pm3386_reg_write(1, 0x002, 0x0060);
+       if (secondary)
+               pm3386_reg_write(1, 0x002, 0x0060);
        mdelay(1);
 
        /* Deassert analog reset.  */
        pm3386_reg_write(0, 0x002, 0x0062);
-       pm3386_reg_write(1, 0x002, 0x0062);
+       if (secondary)
+               pm3386_reg_write(1, 0x002, 0x0062);
        mdelay(10);
 
        /* Deassert digital reset.  */
        pm3386_reg_write(0, 0x002, 0x0063);
-       pm3386_reg_write(1, 0x002, 0x0063);
+       if (secondary)
+               pm3386_reg_write(1, 0x002, 0x0063);
        mdelay(10);
 
        /* Restore programmed MAC addresses.  */
        pm3386_set_mac(0, mac[0]);
        pm3386_set_mac(1, mac[1]);
-       pm3386_set_mac(2, mac[2]);
+       if (secondary)
+               pm3386_set_mac(2, mac[2]);
 
        /* Disable carrier on all ports.  */
        pm3386_set_carrier(0, 0);
        pm3386_set_carrier(1, 0);
-       pm3386_set_carrier(2, 0);
+       if (secondary)
+               pm3386_set_carrier(2, 0);
 }
 
 static u16 swaph(u16 x)
@@ -127,6 +140,11 @@ static u16 swaph(u16 x)
        return ((x << 8) | (x >> 8)) & 0xffff;
 }
 
+int pm3386_port_count(void)
+{
+       return 2 + pm3386_secondary_present();
+}
+
 void pm3386_init_port(int port)
 {
        int pm = port >> 1;
index fe92bb056ac424c9279ca8501016d2f3452187c3..cc4183dca9116959be453b1354065719c850fe4a 100644 (file)
@@ -13,6 +13,7 @@
 #define __PM3386_H
 
 void pm3386_reset(void);
+int pm3386_port_count(void);
 void pm3386_init_port(int port);
 void pm3386_get_mac(int port, u8 *mac);
 void pm3386_set_mac(int port, u8 *mac);
index ffd267fab21da998ed0a349716ba4594706b9aec..62be6d99d05cd3623e1d0df74cff1495adc9b47c 100644 (file)
@@ -1020,8 +1020,19 @@ static int sky2_up(struct net_device *dev)
        struct sky2_hw *hw = sky2->hw;
        unsigned port = sky2->port;
        u32 ramsize, rxspace, imask;
-       int err = -ENOMEM;
+       int err;
+       struct net_device *otherdev = hw->dev[sky2->port^1];
 
+       /* Block bringing up both ports at the same time on a dual port card.
+        * There is an unfixed bug where receiver gets confused and picks up
+        * packets out of order. Until this is fixed, prevent data corruption.
+        */
+       if (otherdev && netif_running(otherdev)) {
+               printk(KERN_INFO PFX "dual port support is disabled.\n");
+               return -EBUSY;
+       }
+
+       err = -ENOMEM;
        if (netif_msg_ifup(sky2))
                printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
 
index 19e2b174d33cc777bf38d506bc82f96121d1fb1b..d378478612fb76d862b4b584aff1cc14b190f228 100644 (file)
@@ -634,6 +634,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4,     quirk_vi
  * non-x86 architectures (yes Via exists on PPC among other places),
  * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
  * interrupts delivered properly.
+ *
+ * Some of the on-chip devices are actually '586 devices' so they are
+ * listed here.
  */
 static void quirk_via_irq(struct pci_dev *dev)
 {
@@ -648,6 +651,10 @@ static void quirk_via_irq(struct pci_dev *dev)
                pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
        }
 }
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq);
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);
@@ -895,6 +902,7 @@ static void __init k8t_sound_hostbridge(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
 
+#ifndef CONFIG_ACPI_SLEEP
 /*
  * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
  * is not activated. The myth is that Asus said that they do not want the
@@ -906,8 +914,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho
  * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it 
  * becomes necessary to do this tweak in two steps -- I've chosen the Host
  * bridge as trigger.
+ *
+ * Actually, leaving it unhidden and not redoing the quirk over suspend2ram
+ * will cause thermal management to break down, and causing machine to
+ * overheat.
  */
-static int __initdata asus_hides_smbus = 0;
+static int __initdata asus_hides_smbus;
 
 static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
 {
@@ -1050,6 +1062,8 @@ static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_ICH6_1,     asus_hides_smbus_lpc_ich6 );
 
+#endif
+
 /*
  * SiS 96x south bridge: BIOS typically hides SMBus device...
  */
index c53db7ceda5e2ab12770964dc18216fa819ab4c8..738b1ef595a3506fd595f9f742da4c6f966b7e47 100644 (file)
@@ -426,7 +426,7 @@ static int ds_open(struct inode *inode, struct file *file)
 
     if (!warning_printed) {
            printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
-                       "usage.\n");
+                       "usage from process: %s.\n", current->comm);
            printk(KERN_INFO "pcmcia: This interface will soon be removed from "
                        "the kernel; please expect breakage unless you upgrade "
                        "to new tools.\n");
@@ -601,8 +601,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
            ret = CS_BAD_ARGS;
        else {
            struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
-           ret = pccard_get_configuration_info(s, p_dev, &buf->config);
-           pcmcia_put_dev(p_dev);
+           if (p_dev == NULL)
+                   ret = CS_BAD_ARGS;
+           else {
+                   ret = pccard_get_configuration_info(s, p_dev, &buf->config);
+                   pcmcia_put_dev(p_dev);
+           }
        }
        break;
     case DS_GET_FIRST_TUPLE:
@@ -632,8 +636,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
                    ret = CS_BAD_ARGS;
            else {
                    struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
-                   ret = pccard_get_status(s, p_dev, &buf->status);
-                   pcmcia_put_dev(p_dev);
+                   if (p_dev == NULL)
+                           ret = CS_BAD_ARGS;
+                   else {
+                           ret = pccard_get_status(s, p_dev, &buf->status);
+                           pcmcia_put_dev(p_dev);
+                   }
            }
            break;
     case DS_VALIDATE_CIS:
@@ -665,9 +673,10 @@ static int ds_ioctl(struct inode * inode, struct file * file,
        if (!(buf->conf_reg.Function &&
             (buf->conf_reg.Function >= s->functions))) {
                struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
-               if (p_dev)
+               if (p_dev) {
                        ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
-               pcmcia_put_dev(p_dev);
+                       pcmcia_put_dev(p_dev);
+               }
        }
        break;
     case DS_GET_FIRST_REGION:
index 5d6b7a57b02f16e81bb1a343480e64e3ff464d5d..e65da921a827da68fb941b1f90aa4025e666636e 100644 (file)
@@ -1348,7 +1348,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
                index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 
                        - channel->ccws;
                if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
-                   (irb->scsw.cstat | SCHN_STAT_PCI))
+                   (irb->scsw.cstat & SCHN_STAT_PCI))
                        /* Bloody io subsystem tells us lies about cpa... */
                        index = (index - 1) & (LCS_NUM_BUFFS - 1);
                while (channel->io_idx != index) {
index 9051b6821c1c790d028925930b214d0ae3a538ad..00881226f8dd789624a955c5c910f018f8ce8e6c 100644 (file)
@@ -875,6 +875,9 @@ static unsigned int ata_id_xfermask(const u16 *id)
 /**
  *     ata_port_queue_task - Queue port_task
  *     @ap: The ata_port to queue port_task for
+ *     @fn: workqueue function to be scheduled
+ *     @data: data value to pass to workqueue function
+ *     @delay: delay time for workqueue function
  *
  *     Schedule @fn(@data) for execution after @delay jiffies using
  *     port_task.  There is one port_task per port and it's the
@@ -3091,8 +3094,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
 /**
  *     ata_dev_init_params - Issue INIT DEV PARAMS command
  *     @dev: Device to which command will be sent
- *     @heads: Number of heads
- *     @sectors: Number of sectors
+ *     @heads: Number of heads (taskfile parameter)
+ *     @sectors: Number of sectors (taskfile parameter)
  *
  *     LOCKING:
  *     Kernel thread context (may sleep)
@@ -5007,6 +5010,7 @@ int ata_device_resume(struct ata_device *dev)
 /**
  *     ata_device_suspend - prepare a device for suspend
  *     @dev: the device to suspend
+ *     @state: target power management state
  *
  *     Flush the cache on the drive, if appropriate, then issue a
  *     standbynow command.
index e6d141dd0385b20d3f3fb54913d98e37e0987790..bfe817fc752001c180117ef96314d709f1bff4f9 100644 (file)
@@ -37,7 +37,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "sata_mv"
-#define DRV_VERSION    "0.6"
+#define DRV_VERSION    "0.7"
 
 enum {
        /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -50,6 +50,12 @@ enum {
 
        MV_PCI_REG_BASE         = 0,
        MV_IRQ_COAL_REG_BASE    = 0x18000,      /* 6xxx part only */
+       MV_IRQ_COAL_CAUSE               = (MV_IRQ_COAL_REG_BASE + 0x08),
+       MV_IRQ_COAL_CAUSE_LO            = (MV_IRQ_COAL_REG_BASE + 0x88),
+       MV_IRQ_COAL_CAUSE_HI            = (MV_IRQ_COAL_REG_BASE + 0x8c),
+       MV_IRQ_COAL_THRESHOLD           = (MV_IRQ_COAL_REG_BASE + 0xcc),
+       MV_IRQ_COAL_TIME_THRESHOLD      = (MV_IRQ_COAL_REG_BASE + 0xd0),
+
        MV_SATAHC0_REG_BASE     = 0x20000,
        MV_FLASH_CTL            = 0x1046c,
        MV_GPIO_PORT_CTL        = 0x104f0,
@@ -302,9 +308,6 @@ struct mv_port_priv {
        dma_addr_t              crpb_dma;
        struct mv_sg            *sg_tbl;
        dma_addr_t              sg_tbl_dma;
-
-       unsigned                req_producer;           /* cp of req_in_ptr */
-       unsigned                rsp_consumer;           /* cp of rsp_out_ptr */
        u32                     pp_flags;
 };
 
@@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap)
        writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
                 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
 
-       pp->req_producer = pp->rsp_consumer = 0;
-
        /* Don't turn on EDMA here...do it before DMA commands only.  Else
         * we'll be unable to send non-data, PIO, etc due to restricted access
         * to shadow regs.
@@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
        }
 }
 
-static inline unsigned mv_inc_q_index(unsigned *index)
+static inline unsigned mv_inc_q_index(unsigned index)
 {
-       *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
-       return *index;
+       return (index + 1) & MV_MAX_Q_DEPTH_MASK;
 }
 
 static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
 {
-       *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
+       u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
                (last ? CRQB_CMD_LAST : 0);
+       *cmdw = cpu_to_le16(tmp);
 }
 
 /**
@@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
        u16 *cw;
        struct ata_taskfile *tf;
        u16 flags = 0;
+       unsigned in_index;
 
        if (ATA_PROT_DMA != qc->tf.protocol)
                return;
 
-       /* the req producer index should be the same as we remember it */
-       WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
-                 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
-               pp->req_producer);
-
        /* Fill in command request block
         */
        if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
        flags |= qc->tag << CRQB_TAG_SHIFT;
 
-       pp->crqb[pp->req_producer].sg_addr =
+       /* get current queue index from hardware */
+       in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
+                       >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+       pp->crqb[in_index].sg_addr =
                cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
-       pp->crqb[pp->req_producer].sg_addr_hi =
+       pp->crqb[in_index].sg_addr_hi =
                cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
-       pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
+       pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
 
-       cw = &pp->crqb[pp->req_producer].ata_cmd[0];
+       cw = &pp->crqb[in_index].ata_cmd[0];
        tf = &qc->tf;
 
        /* Sadly, the CRQB cannot accomodate all registers--there are
@@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
        struct mv_port_priv *pp = ap->private_data;
        struct mv_crqb_iie *crqb;
        struct ata_taskfile *tf;
+       unsigned in_index;
        u32 flags = 0;
 
        if (ATA_PROT_DMA != qc->tf.protocol)
                return;
 
-       /* the req producer index should be the same as we remember it */
-       WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
-                 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
-               pp->req_producer);
-
        /* Fill in Gen IIE command request block
         */
        if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
        WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
        flags |= qc->tag << CRQB_TAG_SHIFT;
 
-       crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
+       /* get current queue index from hardware */
+       in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
+                       >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+       crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
        crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
        crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
        crqb->flags = cpu_to_le32(flags);
@@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
 {
        void __iomem *port_mmio = mv_ap_base(qc->ap);
        struct mv_port_priv *pp = qc->ap->private_data;
+       unsigned in_index;
        u32 in_ptr;
 
        if (ATA_PROT_DMA != qc->tf.protocol) {
@@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
                return ata_qc_issue_prot(qc);
        }
 
-       in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+       in_ptr   = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+       in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
 
-       /* the req producer index should be the same as we remember it */
-       WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
-               pp->req_producer);
        /* until we do queuing, the queue should be empty at this point */
-       WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
-               ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
-                 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
+       WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
+               >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
 
-       mv_inc_q_index(&pp->req_producer);      /* now incr producer index */
+       in_index = mv_inc_q_index(in_index);    /* now incr producer index */
 
        mv_start_dma(port_mmio, pp);
 
        /* and write the request in pointer to kick the EDMA to life */
        in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
-       in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
+       in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
        writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
 
        return 0;
@@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
 {
        void __iomem *port_mmio = mv_ap_base(ap);
        struct mv_port_priv *pp = ap->private_data;
+       unsigned out_index;
        u32 out_ptr;
        u8 ata_status;
 
-       out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+       out_ptr   = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+       out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
 
-       /* the response consumer index should be the same as we remember it */
-       WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
-               pp->rsp_consumer);
-
-       ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
+       ata_status = le16_to_cpu(pp->crpb[out_index].flags)
+                                       >> CRPB_FLAG_STATUS_SHIFT;
 
        /* increment our consumer index... */
-       pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
+       out_index = mv_inc_q_index(out_index);
 
        /* and, until we do NCQ, there should only be 1 CRPB waiting */
-       WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
-                 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
-               pp->rsp_consumer);
+       WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
+               >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
 
        /* write out our inc'd consumer index so EDMA knows we're caught up */
        out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
-       out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
+       out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
        writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
 
        /* Return ATA status register for completed CRPB */
@@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
 /**
  *      mv_err_intr - Handle error interrupts on the port
  *      @ap: ATA channel to manipulate
+ *      @reset_allowed: bool: 0 == don't trigger from reset here
  *
  *      In most cases, just clear the interrupt and move on.  However,
  *      some cases require an eDMA reset, which is done right before
@@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_err_intr(struct ata_port *ap)
+static void mv_err_intr(struct ata_port *ap, int reset_allowed)
 {
        void __iomem *port_mmio = mv_ap_base(ap);
        u32 edma_err_cause, serr = 0;
@@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap)
        writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
 
        /* check for fatal here and recover if needed */
-       if (EDMA_ERR_FATAL & edma_err_cause) {
+       if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
                mv_stop_and_reset(ap);
-       }
 }
 
 /**
@@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
                struct ata_port *ap = host_set->ports[port];
                struct mv_port_priv *pp = ap->private_data;
 
-               hard_port = port & MV_PORT_MASK;        /* range 0-3 */
+               hard_port = mv_hardport_from_port(port); /* range 0..3 */
                handled = 0;    /* ensure ata_status is set if handled++ */
 
                /* Note that DEV_IRQ might happen spuriously during EDMA,
-                * and should be ignored in such cases.  We could mask it,
-                * but it's pretty rare and may not be worth the overhead.
+                * and should be ignored in such cases.
+                * The cause of this is still under investigation.
                 */ 
                if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
                        /* EDMA: check for response queue interrupt */
@@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
                                ata_status = readb((void __iomem *)
                                           ap->ioaddr.status_addr);
                                handled = 1;
+                               /* ignore spurious intr if drive still BUSY */
+                               if (ata_status & ATA_BUSY) {
+                                       ata_status = 0;
+                                       handled = 0;
+                               }
                        }
                }
 
@@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
                        shift++;        /* skip bit 8 in the HC Main IRQ reg */
                }
                if ((PORT0_ERR << shift) & relevant) {
-                       mv_err_intr(ap);
+                       mv_err_intr(ap, 1);
                        err_mask |= AC_ERR_OTHER;
                        handled = 1;
                }
@@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
        struct ata_host_set *host_set = dev_instance;
        unsigned int hc, handled = 0, n_hcs;
        void __iomem *mmio = host_set->mmio_base;
+       struct mv_host_priv *hpriv;
        u32 irq_stat;
 
        irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
@@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
                        handled++;
                }
        }
+
+       hpriv = host_set->private_data;
+       if (IS_60XX(hpriv)) {
+               /* deal with the interrupt coalescing bits */
+               if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
+                       writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
+                       writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
+                       writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
+               }
+       }
+
        if (PCI_ERR & irq_stat) {
                printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
                       readl(mmio + PCI_IRQ_CAUSE_OFS));
@@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
 
        if (IS_60XX(hpriv)) {
                u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
-               ifctl |= (1 << 12) | (1 << 7);
+               ifctl |= (1 << 7);              /* enable gen2i speed */
+               ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
                writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
        }
 
@@ -2033,11 +2048,14 @@ static void mv_eng_timeout(struct ata_port *ap)
               ap->host_set->mmio_base, ap, qc, qc->scsicmd,
               &qc->scsicmd->cmnd);
 
-       mv_err_intr(ap);
+       mv_err_intr(ap, 0);
        mv_stop_and_reset(ap);
 
-       qc->err_mask |= AC_ERR_TIMEOUT;
-       ata_eh_qc_complete(qc);
+       WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+       if (qc->flags & ATA_QCFLAG_ACTIVE) {
+               qc->err_mask |= AC_ERR_TIMEOUT;
+               ata_eh_qc_complete(qc);
+       }
 }
 
 /**
@@ -2231,7 +2249,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
                        void __iomem *port_mmio = mv_port_base(mmio, port);
 
                        u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
-                       ifctl |= (1 << 12);
+                       ifctl |= (1 << 7);              /* enable gen2i speed */
+                       ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
                        writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
                }
 
@@ -2332,6 +2351,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc) {
                return rc;
        }
+       pci_set_master(pdev);
 
        rc = pci_request_regions(pdev, DRV_NAME);
        if (rc) {
index aeb8153ccf2466eae358d0610af4a61300ce4c8d..17839e753e4cfb1fb550832d8613b0ec579c83ef 100644 (file)
@@ -1907,9 +1907,12 @@ uart_set_options(struct uart_port *port, struct console *co,
 static void uart_change_pm(struct uart_state *state, int pm_state)
 {
        struct uart_port *port = state->port;
-       if (port->ops->pm)
-               port->ops->pm(port, pm_state, state->pm_state);
-       state->pm_state = pm_state;
+
+       if (state->pm_state != pm_state) {
+               if (port->ops->pm)
+                       port->ops->pm(port, pm_state, state->pm_state);
+               state->pm_state = pm_state;
+       }
 }
 
 int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
index 7a75faeb0526d6f89c37dbe854c52b5cddd5ebeb..9ce1d01469b19d3c93e61193bd8bce400b9206a8 100644 (file)
@@ -75,6 +75,14 @@ config SPI_BUTTERFLY
          inexpensive battery powered microcontroller evaluation board.
          This same cable can be used to flash new firmware.
 
+config SPI_PXA2XX
+       tristate "PXA2xx SSP SPI master"
+       depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL
+       help
+         This enables using a PXA2xx SSP port as a SPI master controller.
+         The driver can be configured to use any SSP port and additional
+         documentation can be found a Documentation/spi/pxa2xx.
+
 #
 # Add new SPI master controllers in alphabetical order above this line
 #
index c2c87e845abf8d71f3742f29fc16b664d2493d09..1bca5f95de251d00c4775a8e3898da2751b29102 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_MASTER)              += spi.o
 # SPI master controller drivers (bus)
 obj-$(CONFIG_SPI_BITBANG)              += spi_bitbang.o
 obj-$(CONFIG_SPI_BUTTERFLY)            += spi_butterfly.o
+obj-$(CONFIG_SPI_PXA2XX)               += pxa2xx_spi.o
 #      ... add above this line ...
 
 # SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
new file mode 100644 (file)
index 0000000..596bf82
--- /dev/null
@@ -0,0 +1,1467 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/delay.h>
+#include <asm/dma.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/pxa2xx_spi.h>
+
+MODULE_AUTHOR("Stephen Street");
+MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller");
+MODULE_LICENSE("GPL");
+
+#define MAX_BUSES 3
+
+#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
+#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
+#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+
+#define START_STATE ((void*)0)
+#define RUNNING_STATE ((void*)1)
+#define DONE_STATE ((void*)2)
+#define ERROR_STATE ((void*)-1)
+
+#define QUEUE_RUNNING 0
+#define QUEUE_STOPPED 1
+
+struct driver_data {
+       /* Driver model hookup */
+       struct platform_device *pdev;
+
+       /* SPI framework hookup */
+       enum pxa_ssp_type ssp_type;
+       struct spi_master *master;
+
+       /* PXA hookup */
+       struct pxa2xx_spi_master *master_info;
+
+       /* DMA setup stuff */
+       int rx_channel;
+       int tx_channel;
+       u32 *null_dma_buf;
+
+       /* SSP register addresses */
+       void *ioaddr;
+       u32 ssdr_physical;
+
+       /* SSP masks*/
+       u32 dma_cr1;
+       u32 int_cr1;
+       u32 clear_sr;
+       u32 mask_sr;
+
+       /* Driver message queue */
+       struct workqueue_struct *workqueue;
+       struct work_struct pump_messages;
+       spinlock_t lock;
+       struct list_head queue;
+       int busy;
+       int run;
+
+       /* Message Transfer pump */
+       struct tasklet_struct pump_transfers;
+
+       /* Current message transfer state info */
+       struct spi_message* cur_msg;
+       struct spi_transfer* cur_transfer;
+       struct chip_data *cur_chip;
+       size_t len;
+       void *tx;
+       void *tx_end;
+       void *rx;
+       void *rx_end;
+       int dma_mapped;
+       dma_addr_t rx_dma;
+       dma_addr_t tx_dma;
+       size_t rx_map_len;
+       size_t tx_map_len;
+       u8 n_bytes;
+       u32 dma_width;
+       int cs_change;
+       void (*write)(struct driver_data *drv_data);
+       void (*read)(struct driver_data *drv_data);
+       irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+       void (*cs_control)(u32 command);
+};
+
+struct chip_data {
+       u32 cr0;
+       u32 cr1;
+       u32 to;
+       u32 psp;
+       u32 timeout;
+       u8 n_bytes;
+       u32 dma_width;
+       u32 dma_burst_size;
+       u32 threshold;
+       u32 dma_threshold;
+       u8 enable_dma;
+       u8 bits_per_word;
+       u32 speed_hz;
+       void (*write)(struct driver_data *drv_data);
+       void (*read)(struct driver_data *drv_data);
+       void (*cs_control)(u32 command);
+};
+
+static void pump_messages(void *data);
+
+static int flush(struct driver_data *drv_data)
+{
+       unsigned long limit = loops_per_jiffy << 1;
+
+       void *reg = drv_data->ioaddr;
+
+       do {
+               while (read_SSSR(reg) & SSSR_RNE) {
+                       read_SSDR(reg);
+               }
+       } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
+       write_SSSR(SSSR_ROR, reg);
+
+       return limit;
+}
+
+static void restore_state(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       /* Clear status and disable clock */
+       write_SSSR(drv_data->clear_sr, reg);
+       write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
+
+       /* Load the registers */
+       write_SSCR1(drv_data->cur_chip->cr1, reg);
+       write_SSCR0(drv_data->cur_chip->cr0, reg);
+       if (drv_data->ssp_type != PXA25x_SSP) {
+               write_SSTO(0, reg);
+               write_SSPSP(drv_data->cur_chip->psp, reg);
+       }
+}
+
+static void null_cs_control(u32 command)
+{
+}
+
+static void null_writer(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+       u8 n_bytes = drv_data->n_bytes;
+
+       while ((read_SSSR(reg) & SSSR_TNF)
+                       && (drv_data->tx < drv_data->tx_end)) {
+               write_SSDR(0, reg);
+               drv_data->tx += n_bytes;
+       }
+}
+
+static void null_reader(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+       u8 n_bytes = drv_data->n_bytes;
+
+       while ((read_SSSR(reg) & SSSR_RNE)
+                       && (drv_data->rx < drv_data->rx_end)) {
+               read_SSDR(reg);
+               drv_data->rx += n_bytes;
+       }
+}
+
+static void u8_writer(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       while ((read_SSSR(reg) & SSSR_TNF)
+                       && (drv_data->tx < drv_data->tx_end)) {
+               write_SSDR(*(u8 *)(drv_data->tx), reg);
+               ++drv_data->tx;
+       }
+}
+
+static void u8_reader(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       while ((read_SSSR(reg) & SSSR_RNE)
+                       && (drv_data->rx < drv_data->rx_end)) {
+               *(u8 *)(drv_data->rx) = read_SSDR(reg);
+               ++drv_data->rx;
+       }
+}
+
+static void u16_writer(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       while ((read_SSSR(reg) & SSSR_TNF)
+                       && (drv_data->tx < drv_data->tx_end)) {
+               write_SSDR(*(u16 *)(drv_data->tx), reg);
+               drv_data->tx += 2;
+       }
+}
+
+static void u16_reader(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       while ((read_SSSR(reg) & SSSR_RNE)
+                       && (drv_data->rx < drv_data->rx_end)) {
+               *(u16 *)(drv_data->rx) = read_SSDR(reg);
+               drv_data->rx += 2;
+       }
+}
+static void u32_writer(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       while ((read_SSSR(reg) & SSSR_TNF)
+                       && (drv_data->tx < drv_data->tx_end)) {
+               write_SSDR(*(u32 *)(drv_data->tx), reg);
+               drv_data->tx += 4;
+       }
+}
+
+static void u32_reader(struct driver_data *drv_data)
+{
+       void *reg = drv_data->ioaddr;
+
+       while ((read_SSSR(reg) & SSSR_RNE)
+                       && (drv_data->rx < drv_data->rx_end)) {
+               *(u32 *)(drv_data->rx) = read_SSDR(reg);
+               drv_data->rx += 4;
+       }
+}
+
+static void *next_transfer(struct driver_data *drv_data)
+{
+       struct spi_message *msg = drv_data->cur_msg;
+       struct spi_transfer *trans = drv_data->cur_transfer;
+
+       /* Move to next transfer */
+       if (trans->transfer_list.next != &msg->transfers) {
+               drv_data->cur_transfer =
+                       list_entry(trans->transfer_list.next,
+                                       struct spi_transfer,
+                                       transfer_list);
+               return RUNNING_STATE;
+       } else
+               return DONE_STATE;
+}
+
+static int map_dma_buffers(struct driver_data *drv_data)
+{
+       struct spi_message *msg = drv_data->cur_msg;
+       struct device *dev = &msg->spi->dev;
+
+       if (!drv_data->cur_chip->enable_dma)
+               return 0;
+
+       if (msg->is_dma_mapped)
+               return  drv_data->rx_dma && drv_data->tx_dma;
+
+       if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
+               return 0;
+
+       /* Modify setup if rx buffer is null */
+       if (drv_data->rx == NULL) {
+               *drv_data->null_dma_buf = 0;
+               drv_data->rx = drv_data->null_dma_buf;
+               drv_data->rx_map_len = 4;
+       } else
+               drv_data->rx_map_len = drv_data->len;
+
+
+       /* Modify setup if tx buffer is null */
+       if (drv_data->tx == NULL) {
+               *drv_data->null_dma_buf = 0;
+               drv_data->tx = drv_data->null_dma_buf;
+               drv_data->tx_map_len = 4;
+       } else
+               drv_data->tx_map_len = drv_data->len;
+
+       /* Stream map the rx buffer */
+       drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
+                                               drv_data->rx_map_len,
+                                               DMA_FROM_DEVICE);
+       if (dma_mapping_error(drv_data->rx_dma))
+               return 0;
+
+       /* Stream map the tx buffer */
+       drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
+                                               drv_data->tx_map_len,
+                                               DMA_TO_DEVICE);
+
+       if (dma_mapping_error(drv_data->tx_dma)) {
+               dma_unmap_single(dev, drv_data->rx_dma,
+                                       drv_data->rx_map_len, DMA_FROM_DEVICE);
+               return 0;
+       }
+
+       return 1;
+}
+
+static void unmap_dma_buffers(struct driver_data *drv_data)
+{
+       struct device *dev;
+
+       if (!drv_data->dma_mapped)
+               return;
+
+       if (!drv_data->cur_msg->is_dma_mapped) {
+               dev = &drv_data->cur_msg->spi->dev;
+               dma_unmap_single(dev, drv_data->rx_dma,
+                                       drv_data->rx_map_len, DMA_FROM_DEVICE);
+               dma_unmap_single(dev, drv_data->tx_dma,
+                                       drv_data->tx_map_len, DMA_TO_DEVICE);
+       }
+
+       drv_data->dma_mapped = 0;
+}
+
+/* caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct spi_message *message, struct driver_data *drv_data)
+{
+       struct spi_transfer* last_transfer;
+
+       last_transfer = list_entry(message->transfers.prev,
+                                       struct spi_transfer,
+                                       transfer_list);
+
+       if (!last_transfer->cs_change)
+               drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+       message->state = NULL;
+       if (message->complete)
+               message->complete(message->context);
+
+       drv_data->cur_msg = NULL;
+       drv_data->cur_transfer = NULL;
+       drv_data->cur_chip = NULL;
+       queue_work(drv_data->workqueue, &drv_data->pump_messages);
+}
+
+static int wait_ssp_rx_stall(void *ioaddr)
+{
+       unsigned long limit = loops_per_jiffy << 1;
+
+       while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
+               cpu_relax();
+
+       return limit;
+}
+
+static int wait_dma_channel_stop(int channel)
+{
+       unsigned long limit = loops_per_jiffy << 1;
+
+       while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
+               cpu_relax();
+
+       return limit;
+}
+
+static void dma_handler(int channel, void *data, struct pt_regs *regs)
+{
+       struct driver_data *drv_data = data;
+       struct spi_message *msg = drv_data->cur_msg;
+       void *reg = drv_data->ioaddr;
+       u32 irq_status = DCSR(channel) & DMA_INT_MASK;
+       u32 trailing_sssr = 0;
+
+       if (irq_status & DCSR_BUSERR) {
+
+               /* Disable interrupts, clear status and reset DMA */
+               if (drv_data->ssp_type != PXA25x_SSP)
+                       write_SSTO(0, reg);
+               write_SSSR(drv_data->clear_sr, reg);
+               write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+               DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+               DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+
+               if (flush(drv_data) == 0)
+                       dev_err(&drv_data->pdev->dev,
+                                       "dma_handler: flush fail\n");
+
+               unmap_dma_buffers(drv_data);
+
+               if (channel == drv_data->tx_channel)
+                       dev_err(&drv_data->pdev->dev,
+                               "dma_handler: bad bus address on "
+                               "tx channel %d, source %x target = %x\n",
+                               channel, DSADR(channel), DTADR(channel));
+               else
+                       dev_err(&drv_data->pdev->dev,
+                               "dma_handler: bad bus address on "
+                               "rx channel %d, source %x target = %x\n",
+                               channel, DSADR(channel), DTADR(channel));
+
+               msg->state = ERROR_STATE;
+               tasklet_schedule(&drv_data->pump_transfers);
+       }
+
+       /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
+       if ((drv_data->ssp_type == PXA25x_SSP)
+               && (channel == drv_data->tx_channel)
+               && (irq_status & DCSR_ENDINTR)) {
+
+               /* Wait for rx to stall */
+               if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+                       dev_err(&drv_data->pdev->dev,
+                               "dma_handler: ssp rx stall failed\n");
+
+               /* Clear and disable interrupts on SSP and DMA channels*/
+               write_SSSR(drv_data->clear_sr, reg);
+               write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+               DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+               DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+               if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+                       dev_err(&drv_data->pdev->dev,
+                               "dma_handler: dma rx channel stop failed\n");
+
+               unmap_dma_buffers(drv_data);
+
+               /* Read trailing bytes */
+               /* Calculate number of trailing bytes, read them */
+               trailing_sssr = read_SSSR(reg);
+               if ((trailing_sssr & 0xf008) != 0xf000) {
+                       drv_data->rx = drv_data->rx_end -
+                                       (((trailing_sssr >> 12) & 0x0f) + 1);
+                       drv_data->read(drv_data);
+               }
+               msg->actual_length += drv_data->len;
+
+               /* Release chip select if requested, transfer delays are
+                * handled in pump_transfers */
+               if (drv_data->cs_change)
+                       drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+               /* Move to next transfer */
+               msg->state = next_transfer(drv_data);
+
+               /* Schedule transfer tasklet */
+               tasklet_schedule(&drv_data->pump_transfers);
+       }
+}
+
+static irqreturn_t dma_transfer(struct driver_data *drv_data)
+{
+       u32 irq_status;
+       u32 trailing_sssr = 0;
+       struct spi_message *msg = drv_data->cur_msg;
+       void *reg = drv_data->ioaddr;
+
+       irq_status = read_SSSR(reg) & drv_data->mask_sr;
+       if (irq_status & SSSR_ROR) {
+               /* Clear and disable interrupts on SSP and DMA channels*/
+               if (drv_data->ssp_type != PXA25x_SSP)
+                       write_SSTO(0, reg);
+               write_SSSR(drv_data->clear_sr, reg);
+               write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+               DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+               DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+               unmap_dma_buffers(drv_data);
+
+               if (flush(drv_data) == 0)
+                       dev_err(&drv_data->pdev->dev,
+                                       "dma_transfer: flush fail\n");
+
+               dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
+
+               drv_data->cur_msg->state = ERROR_STATE;
+               tasklet_schedule(&drv_data->pump_transfers);
+
+               return IRQ_HANDLED;
+       }
+
+       /* Check for false positive timeout */
+       if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) {
+               write_SSSR(SSSR_TINT, reg);
+               return IRQ_HANDLED;
+       }
+
+       if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
+
+               /* Clear and disable interrupts on SSP and DMA channels*/
+               if (drv_data->ssp_type != PXA25x_SSP)
+                       write_SSTO(0, reg);
+               write_SSSR(drv_data->clear_sr, reg);
+               write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+               DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+               DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+
+               if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+                       dev_err(&drv_data->pdev->dev,
+                               "dma_transfer: dma rx channel stop failed\n");
+
+               if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+                       dev_err(&drv_data->pdev->dev,
+                               "dma_transfer: ssp rx stall failed\n");
+
+               unmap_dma_buffers(drv_data);
+
+               /* Calculate number of trailing bytes, read them */
+               trailing_sssr = read_SSSR(reg);
+               if ((trailing_sssr & 0xf008) != 0xf000) {
+                       drv_data->rx = drv_data->rx_end -
+                                       (((trailing_sssr >> 12) & 0x0f) + 1);
+                       drv_data->read(drv_data);
+               }
+               msg->actual_length += drv_data->len;
+
+               /* Release chip select if requested, transfer delays are
+                * handled in pump_transfers */
+               if (drv_data->cs_change)
+                       drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+               /* Move to next transfer */
+               msg->state = next_transfer(drv_data);
+
+               /* Schedule transfer tasklet */
+               tasklet_schedule(&drv_data->pump_transfers);
+
+               return IRQ_HANDLED;
+       }
+
+       /* Opps problem detected */
+       return IRQ_NONE;
+}
+
+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
+{
+       u32 irq_status;
+       struct spi_message *msg = drv_data->cur_msg;
+       void *reg = drv_data->ioaddr;
+       irqreturn_t handled = IRQ_NONE;
+       unsigned long limit = loops_per_jiffy << 1;
+
+       while ((irq_status = (read_SSSR(reg) & drv_data->mask_sr))) {
+
+               if (irq_status & SSSR_ROR) {
+
+                       /* Clear and disable interrupts */
+                       if (drv_data->ssp_type != PXA25x_SSP)
+                               write_SSTO(0, reg);
+                       write_SSSR(drv_data->clear_sr, reg);
+                       write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+
+                       if (flush(drv_data) == 0)
+                               dev_err(&drv_data->pdev->dev,
+                                       "interrupt_transfer: flush fail\n");
+
+                       dev_warn(&drv_data->pdev->dev,
+                                       "interrupt_transfer: fifo overun\n");
+
+                       msg->state = ERROR_STATE;
+                       tasklet_schedule(&drv_data->pump_transfers);
+
+                       return IRQ_HANDLED;
+               }
+
+               /* Look for false positive timeout */
+               if ((irq_status & SSSR_TINT)
+                               && (drv_data->rx < drv_data->rx_end))
+                       write_SSSR(SSSR_TINT, reg);
+
+               /* Pump data */
+               drv_data->read(drv_data);
+               drv_data->write(drv_data);
+
+               if (drv_data->tx == drv_data->tx_end) {
+                       /* Disable tx interrupt */
+                       write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
+
+                       /* PXA25x_SSP has no timeout, read trailing bytes */
+                       if (drv_data->ssp_type == PXA25x_SSP) {
+                               while ((read_SSSR(reg) & SSSR_BSY) && limit--)
+                                       drv_data->read(drv_data);
+
+                               if (limit == 0)
+                                       dev_err(&drv_data->pdev->dev,
+                                               "interrupt_transfer: "
+                                               "trailing byte read failed\n");
+                       }
+               }
+
+               if ((irq_status & SSSR_TINT)
+                               || (drv_data->rx == drv_data->rx_end)) {
+
+                       /* Clear timeout */
+                       if (drv_data->ssp_type != PXA25x_SSP)
+                               write_SSTO(0, reg);
+                       write_SSSR(drv_data->clear_sr, reg);
+                       write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+
+                       /* Update total byte transfered */
+                       msg->actual_length += drv_data->len;
+
+                       /* Release chip select if requested, transfer delays are
+                        * handled in pump_transfers */
+                       if (drv_data->cs_change)
+                               drv_data->cs_control(PXA2XX_CS_DEASSERT);
+
+                       /* Move to next transfer */
+                       msg->state = next_transfer(drv_data);
+
+                       /* Schedule transfer tasklet */
+                       tasklet_schedule(&drv_data->pump_transfers);
+
+                       return IRQ_HANDLED;
+               }
+
+               /* We did something */
+               handled = IRQ_HANDLED;
+       }
+
+       return handled;
+}
+
+static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct driver_data *drv_data = (struct driver_data *)dev_id;
+
+       if (!drv_data->cur_msg) {
+               dev_err(&drv_data->pdev->dev, "bad message state "
+                               "in interrupt handler\n");
+               /* Never fail */
+               return IRQ_HANDLED;
+       }
+
+       return drv_data->transfer_handler(drv_data);
+}
+
+static void pump_transfers(unsigned long data)
+{
+       struct driver_data *drv_data = (struct driver_data *)data;
+       struct spi_message *message = NULL;
+       struct spi_transfer *transfer = NULL;
+       struct spi_transfer *previous = NULL;
+       struct chip_data *chip = NULL;
+       void *reg = drv_data->ioaddr;
+       u32 clk_div = 0;
+       u8 bits = 0;
+       u32 speed = 0;
+       u32 cr0;
+
+       /* Get current state information */
+       message = drv_data->cur_msg;
+       transfer = drv_data->cur_transfer;
+       chip = drv_data->cur_chip;
+
+       /* Handle for abort */
+       if (message->state == ERROR_STATE) {
+               message->status = -EIO;
+               giveback(message, drv_data);
+               return;
+       }
+
+       /* Handle end of message */
+       if (message->state == DONE_STATE) {
+               message->status = 0;
+               giveback(message, drv_data);
+               return;
+       }
+
+       /* Delay if requested at end of transfer*/
+       if (message->state == RUNNING_STATE) {
+               previous = list_entry(transfer->transfer_list.prev,
+                                       struct spi_transfer,
+                                       transfer_list);
+               if (previous->delay_usecs)
+                       udelay(previous->delay_usecs);
+       }
+
+       /* Setup the transfer state based on the type of transfer */
+       if (flush(drv_data) == 0) {
+               dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
+               message->status = -EIO;
+               giveback(message, drv_data);
+               return;
+       }
+       drv_data->n_bytes = chip->n_bytes;
+       drv_data->dma_width = chip->dma_width;
+       drv_data->cs_control = chip->cs_control;
+       drv_data->tx = (void *)transfer->tx_buf;
+       drv_data->tx_end = drv_data->tx + transfer->len;
+       drv_data->rx = transfer->rx_buf;
+       drv_data->rx_end = drv_data->rx + transfer->len;
+       drv_data->rx_dma = transfer->rx_dma;
+       drv_data->tx_dma = transfer->tx_dma;
+       drv_data->len = transfer->len;
+       drv_data->write = drv_data->tx ? chip->write : null_writer;
+       drv_data->read = drv_data->rx ? chip->read : null_reader;
+       drv_data->cs_change = transfer->cs_change;
+
+       /* Change speed and bit per word on a per transfer */
+       if (transfer->speed_hz || transfer->bits_per_word) {
+
+               /* Disable clock */
+               write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg);
+               cr0 = chip->cr0;
+               bits = chip->bits_per_word;
+               speed = chip->speed_hz;
+
+               if (transfer->speed_hz)
+                       speed = transfer->speed_hz;
+
+               if (transfer->bits_per_word)
+                       bits = transfer->bits_per_word;
+
+               if (reg == SSP1_VIRT)
+                       clk_div = SSP1_SerClkDiv(speed);
+               else if (reg == SSP2_VIRT)
+                       clk_div = SSP2_SerClkDiv(speed);
+               else if (reg == SSP3_VIRT)
+                       clk_div = SSP3_SerClkDiv(speed);
+
+               if (bits <= 8) {
+                       drv_data->n_bytes = 1;
+                       drv_data->dma_width = DCMD_WIDTH1;
+                       drv_data->read = drv_data->read != null_reader ?
+                                               u8_reader : null_reader;
+                       drv_data->write = drv_data->write != null_writer ?
+                                               u8_writer : null_writer;
+               } else if (bits <= 16) {
+                       drv_data->n_bytes = 2;
+                       drv_data->dma_width = DCMD_WIDTH2;
+                       drv_data->read = drv_data->read != null_reader ?
+                                               u16_reader : null_reader;
+                       drv_data->write = drv_data->write != null_writer ?
+                                               u16_writer : null_writer;
+               } else if (bits <= 32) {
+                       drv_data->n_bytes = 4;
+                       drv_data->dma_width = DCMD_WIDTH4;
+                       drv_data->read = drv_data->read != null_reader ?
+                                               u32_reader : null_reader;
+                       drv_data->write = drv_data->write != null_writer ?
+                                               u32_writer : null_writer;
+               }
+
+               cr0 = clk_div
+                       | SSCR0_Motorola
+                       | SSCR0_DataSize(bits & 0x0f)
+                       | SSCR0_SSE
+                       | (bits > 16 ? SSCR0_EDSS : 0);
+
+               /* Start it back up */
+               write_SSCR0(cr0, reg);
+       }
+
+       message->state = RUNNING_STATE;
+
+       /* Try to map dma buffer and do a dma transfer if successful */
+       if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
+
+               /* Ensure we have the correct interrupt handler */
+               drv_data->transfer_handler = dma_transfer;
+
+               /* Setup rx DMA Channel */
+               DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+               DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
+               DTADR(drv_data->rx_channel) = drv_data->rx_dma;
+               if (drv_data->rx == drv_data->null_dma_buf)
+                       /* No target address increment */
+                       DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
+                                                       | drv_data->dma_width
+                                                       | chip->dma_burst_size
+                                                       | drv_data->len;
+               else
+                       DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
+                                                       | DCMD_FLOWSRC
+                                                       | drv_data->dma_width
+                                                       | chip->dma_burst_size
+                                                       | drv_data->len;
+
+               /* Setup tx DMA Channel */
+               DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+               DSADR(drv_data->tx_channel) = drv_data->tx_dma;
+               DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
+               if (drv_data->tx == drv_data->null_dma_buf)
+                       /* No source address increment */
+                       DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
+                                                       | drv_data->dma_width
+                                                       | chip->dma_burst_size
+                                                       | drv_data->len;
+               else
+                       DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
+                                                       | DCMD_FLOWTRG
+                                                       | drv_data->dma_width
+                                                       | chip->dma_burst_size
+                                                       | drv_data->len;
+
+               /* Enable dma end irqs on SSP to detect end of transfer */
+               if (drv_data->ssp_type == PXA25x_SSP)
+                       DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+
+               /* Fix me, need to handle cs polarity */
+               drv_data->cs_control(PXA2XX_CS_ASSERT);
+
+               /* Go baby, go */
+               write_SSSR(drv_data->clear_sr, reg);
+               DCSR(drv_data->rx_channel) |= DCSR_RUN;
+               DCSR(drv_data->tx_channel) |= DCSR_RUN;
+               if (drv_data->ssp_type != PXA25x_SSP)
+                       write_SSTO(chip->timeout, reg);
+               write_SSCR1(chip->cr1
+                               | chip->dma_threshold
+                               | drv_data->dma_cr1,
+                               reg);
+       } else {
+               /* Ensure we have the correct interrupt handler */
+               drv_data->transfer_handler = interrupt_transfer;
+
+               /* Fix me, need to handle cs polarity */
+               drv_data->cs_control(PXA2XX_CS_ASSERT);
+
+               /* Go baby, go */
+               write_SSSR(drv_data->clear_sr, reg);
+               if (drv_data->ssp_type != PXA25x_SSP)
+                       write_SSTO(chip->timeout, reg);
+               write_SSCR1(chip->cr1
+                               | chip->threshold
+                               | drv_data->int_cr1,
+                               reg);
+       }
+}
+
+static void pump_messages(void *data)
+{
+       struct driver_data *drv_data = data;
+       unsigned long flags;
+
+       /* Lock queue and check for queue work */
+       spin_lock_irqsave(&drv_data->lock, flags);
+       if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
+               drv_data->busy = 0;
+               spin_unlock_irqrestore(&drv_data->lock, flags);
+               return;
+       }
+
+       /* Make sure we are not already running a message */
+       if (drv_data->cur_msg) {
+               spin_unlock_irqrestore(&drv_data->lock, flags);
+               return;
+       }
+
+       /* Extract head of queue */
+       drv_data->cur_msg = list_entry(drv_data->queue.next,
+                                       struct spi_message, queue);
+       list_del_init(&drv_data->cur_msg->queue);
+       drv_data->busy = 1;
+       spin_unlock_irqrestore(&drv_data->lock, flags);
+
+       /* Initial message state*/
+       drv_data->cur_msg->state = START_STATE;
+       drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+                                               struct spi_transfer,
+                                               transfer_list);
+
+       /* Setup the SSP using the per chip configuration */
+       drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+       restore_state(drv_data);
+
+       /* Mark as busy and launch transfers */
+       tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+       struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+       unsigned long flags;
+
+       spin_lock_irqsave(&drv_data->lock, flags);
+
+       if (drv_data->run == QUEUE_STOPPED) {
+               spin_unlock_irqrestore(&drv_data->lock, flags);
+               return -ESHUTDOWN;
+       }
+
+       msg->actual_length = 0;
+       msg->status = -EINPROGRESS;
+       msg->state = START_STATE;
+
+       list_add_tail(&msg->queue, &drv_data->queue);
+
+       if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
+               queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+       spin_unlock_irqrestore(&drv_data->lock, flags);
+
+       return 0;
+}
+
+static int setup(struct spi_device *spi)
+{
+       struct pxa2xx_spi_chip *chip_info = NULL;
+       struct chip_data *chip;
+       struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+       unsigned int clk_div;
+
+       if (!spi->bits_per_word)
+               spi->bits_per_word = 8;
+
+       if (drv_data->ssp_type != PXA25x_SSP
+                       && (spi->bits_per_word < 4 || spi->bits_per_word > 32))
+               return -EINVAL;
+       else if (spi->bits_per_word < 4 || spi->bits_per_word > 16)
+               return -EINVAL;
+
+       /* Only alloc (or use chip_info) on first setup */
+       chip = spi_get_ctldata(spi);
+       if (chip == NULL) {
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+               if (!chip)
+                       return -ENOMEM;
+
+               chip->cs_control = null_cs_control;
+               chip->enable_dma = 0;
+               chip->timeout = 5;
+               chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
+               chip->dma_burst_size = drv_data->master_info->enable_dma ?
+                                       DCMD_BURST8 : 0;
+
+               chip_info = spi->controller_data;
+       }
+
+       /* chip_info isn't always needed */
+       if (chip_info) {
+               if (chip_info->cs_control)
+                       chip->cs_control = chip_info->cs_control;
+
+               chip->timeout = (chip_info->timeout_microsecs * 10000) / 2712;
+
+               chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold)
+                                       | SSCR1_TxTresh(chip_info->tx_threshold);
+
+               chip->enable_dma = chip_info->dma_burst_size != 0
+                                       && drv_data->master_info->enable_dma;
+               chip->dma_threshold = 0;
+
+               if (chip->enable_dma) {
+                       if (chip_info->dma_burst_size <= 8) {
+                               chip->dma_threshold = SSCR1_RxTresh(8)
+                                                       | SSCR1_TxTresh(8);
+                               chip->dma_burst_size = DCMD_BURST8;
+                       } else if (chip_info->dma_burst_size <= 16) {
+                               chip->dma_threshold = SSCR1_RxTresh(16)
+                                                       | SSCR1_TxTresh(16);
+                               chip->dma_burst_size = DCMD_BURST16;
+                       } else {
+                               chip->dma_threshold = SSCR1_RxTresh(32)
+                                                       | SSCR1_TxTresh(32);
+                               chip->dma_burst_size = DCMD_BURST32;
+                       }
+               }
+
+
+               if (chip_info->enable_loopback)
+                       chip->cr1 = SSCR1_LBM;
+       }
+
+       if (drv_data->ioaddr == SSP1_VIRT)
+               clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
+       else if (drv_data->ioaddr == SSP2_VIRT)
+               clk_div = SSP2_SerClkDiv(spi->max_speed_hz);
+       else if (drv_data->ioaddr == SSP3_VIRT)
+               clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
+       else
+               return -ENODEV;
+       chip->speed_hz = spi->max_speed_hz;
+
+       chip->cr0 = clk_div
+                       | SSCR0_Motorola
+                       | SSCR0_DataSize(spi->bits_per_word & 0x0f)
+                       | SSCR0_SSE
+                       | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+       chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4)
+                       | (((spi->mode & SPI_CPOL) != 0) << 3);
+
+       /* NOTE:  PXA25x_SSP _could_ use external clocking ... */
+       if (drv_data->ssp_type != PXA25x_SSP)
+               dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
+                               spi->bits_per_word,
+                               (CLOCK_SPEED_HZ)
+                                       / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
+                               spi->mode & 0x3);
+       else
+               dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
+                               spi->bits_per_word,
+                               (CLOCK_SPEED_HZ/2)
+                                       / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
+                               spi->mode & 0x3);
+
+       if (spi->bits_per_word <= 8) {
+               chip->n_bytes = 1;
+               chip->dma_width = DCMD_WIDTH1;
+               chip->read = u8_reader;
+               chip->write = u8_writer;
+       } else if (spi->bits_per_word <= 16) {
+               chip->n_bytes = 2;
+               chip->dma_width = DCMD_WIDTH2;
+               chip->read = u16_reader;
+               chip->write = u16_writer;
+       } else if (spi->bits_per_word <= 32) {
+               chip->cr0 |= SSCR0_EDSS;
+               chip->n_bytes = 4;
+               chip->dma_width = DCMD_WIDTH4;
+               chip->read = u32_reader;
+               chip->write = u32_writer;
+       } else {
+               dev_err(&spi->dev, "invalid wordsize\n");
+               kfree(chip);
+               return -ENODEV;
+       }
+       chip->bits_per_word = spi->bits_per_word;
+
+       spi_set_ctldata(spi, chip);
+
+       return 0;
+}
+
+static void cleanup(const struct spi_device *spi)
+{
+       struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+       kfree(chip);
+}
+
+static int init_queue(struct driver_data *drv_data)
+{
+       INIT_LIST_HEAD(&drv_data->queue);
+       spin_lock_init(&drv_data->lock);
+
+       drv_data->run = QUEUE_STOPPED;
+       drv_data->busy = 0;
+
+       tasklet_init(&drv_data->pump_transfers,
+                       pump_transfers, (unsigned long)drv_data);
+
+       INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+       drv_data->workqueue = create_singlethread_workqueue(
+                                       drv_data->master->cdev.dev->bus_id);
+       if (drv_data->workqueue == NULL)
+               return -EBUSY;
+
+       return 0;
+}
+
+static int start_queue(struct driver_data *drv_data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&drv_data->lock, flags);
+
+       if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
+               spin_unlock_irqrestore(&drv_data->lock, flags);
+               return -EBUSY;
+       }
+
+       drv_data->run = QUEUE_RUNNING;
+       drv_data->cur_msg = NULL;
+       drv_data->cur_transfer = NULL;
+       drv_data->cur_chip = NULL;
+       spin_unlock_irqrestore(&drv_data->lock, flags);
+
+       queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+       return 0;
+}
+
+static int stop_queue(struct driver_data *drv_data)
+{
+       unsigned long flags;
+       unsigned limit = 500;
+       int status = 0;
+
+       spin_lock_irqsave(&drv_data->lock, flags);
+
+       /* This is a bit lame, but is optimized for the common execution path.
+        * A wait_queue on the drv_data->busy could be used, but then the common
+        * execution path (pump_messages) would be required to call wake_up or
+        * friends on every SPI message. Do this instead */
+       drv_data->run = QUEUE_STOPPED;
+       while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+               spin_unlock_irqrestore(&drv_data->lock, flags);
+               msleep(10);
+               spin_lock_irqsave(&drv_data->lock, flags);
+       }
+
+       if (!list_empty(&drv_data->queue) || drv_data->busy)
+               status = -EBUSY;
+
+       spin_unlock_irqrestore(&drv_data->lock, flags);
+
+       return status;
+}
+
+static int destroy_queue(struct driver_data *drv_data)
+{
+       int status;
+
+       status = stop_queue(drv_data);
+       if (status != 0)
+               return status;
+
+       destroy_workqueue(drv_data->workqueue);
+
+       return 0;
+}
+
+static int pxa2xx_spi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct pxa2xx_spi_master *platform_info;
+       struct spi_master *master;
+       struct driver_data *drv_data = 0;
+       struct resource *memory_resource;
+       int irq;
+       int status = 0;
+
+       platform_info = dev->platform_data;
+
+       if (platform_info->ssp_type == SSP_UNDEFINED) {
+               dev_err(&pdev->dev, "undefined SSP\n");
+               return -ENODEV;
+       }
+
+       /* Allocate master with space for drv_data and null dma buffer */
+       master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
+       if (!master) {
+               dev_err(&pdev->dev, "can not alloc spi_master\n");
+               return -ENOMEM;
+       }
+       drv_data = spi_master_get_devdata(master);
+       drv_data->master = master;
+       drv_data->master_info = platform_info;
+       drv_data->pdev = pdev;
+
+       master->bus_num = pdev->id;
+       master->num_chipselect = platform_info->num_chipselect;
+       master->cleanup = cleanup;
+       master->setup = setup;
+       master->transfer = transfer;
+
+       drv_data->ssp_type = platform_info->ssp_type;
+       drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
+                                               sizeof(struct driver_data)), 8);
+
+       /* Setup register addresses */
+       memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!memory_resource) {
+               dev_err(&pdev->dev, "memory resources not defined\n");
+               status = -ENODEV;
+               goto out_error_master_alloc;
+       }
+
+       drv_data->ioaddr = (void *)io_p2v(memory_resource->start);
+       drv_data->ssdr_physical = memory_resource->start + 0x00000010;
+       if (platform_info->ssp_type == PXA25x_SSP) {
+               drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
+               drv_data->dma_cr1 = 0;
+               drv_data->clear_sr = SSSR_ROR;
+               drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
+       } else {
+               drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
+               drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
+               drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
+               drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
+       }
+
+       /* Attach to IRQ */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "irq resource not defined\n");
+               status = -ENODEV;
+               goto out_error_master_alloc;
+       }
+
+       status = request_irq(irq, ssp_int, SA_INTERRUPT, dev->bus_id, drv_data);
+       if (status < 0) {
+               dev_err(&pdev->dev, "can not get IRQ\n");
+               goto out_error_master_alloc;
+       }
+
+       /* Setup DMA if requested */
+       drv_data->tx_channel = -1;
+       drv_data->rx_channel = -1;
+       if (platform_info->enable_dma) {
+
+               /* Get two DMA channels (rx and tx) */
+               drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
+                                                       DMA_PRIO_HIGH,
+                                                       dma_handler,
+                                                       drv_data);
+               if (drv_data->rx_channel < 0) {
+                       dev_err(dev, "problem (%d) requesting rx channel\n",
+                               drv_data->rx_channel);
+                       status = -ENODEV;
+                       goto out_error_irq_alloc;
+               }
+               drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
+                                                       DMA_PRIO_MEDIUM,
+                                                       dma_handler,
+                                                       drv_data);
+               if (drv_data->tx_channel < 0) {
+                       dev_err(dev, "problem (%d) requesting tx channel\n",
+                               drv_data->tx_channel);
+                       status = -ENODEV;
+                       goto out_error_dma_alloc;
+               }
+
+               if (drv_data->ioaddr == SSP1_VIRT) {
+                               DRCMRRXSSDR = DRCMR_MAPVLD
+                                               | drv_data->rx_channel;
+                               DRCMRTXSSDR = DRCMR_MAPVLD
+                                               | drv_data->tx_channel;
+               } else if (drv_data->ioaddr == SSP2_VIRT) {
+                               DRCMRRXSS2DR = DRCMR_MAPVLD
+                                               | drv_data->rx_channel;
+                               DRCMRTXSS2DR = DRCMR_MAPVLD
+                                               | drv_data->tx_channel;
+               } else if (drv_data->ioaddr == SSP3_VIRT) {
+                               DRCMRRXSS3DR = DRCMR_MAPVLD
+                                               | drv_data->rx_channel;
+                               DRCMRTXSS3DR = DRCMR_MAPVLD
+                                               | drv_data->tx_channel;
+               } else {
+                       dev_err(dev, "bad SSP type\n");
+                       goto out_error_dma_alloc;
+               }
+       }
+
+       /* Enable SOC clock */
+       pxa_set_cken(platform_info->clock_enable, 1);
+
+       /* Load default SSP configuration */
+       write_SSCR0(0, drv_data->ioaddr);
+       write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
+       write_SSCR0(SSCR0_SerClkDiv(2)
+                       | SSCR0_Motorola
+                       | SSCR0_DataSize(8),
+                       drv_data->ioaddr);
+       if (drv_data->ssp_type != PXA25x_SSP)
+               write_SSTO(0, drv_data->ioaddr);
+       write_SSPSP(0, drv_data->ioaddr);
+
+       /* Initial and start queue */
+       status = init_queue(drv_data);
+       if (status != 0) {
+               dev_err(&pdev->dev, "problem initializing queue\n");
+               goto out_error_clock_enabled;
+       }
+       status = start_queue(drv_data);
+       if (status != 0) {
+               dev_err(&pdev->dev, "problem starting queue\n");
+               goto out_error_clock_enabled;
+       }
+
+       /* Register with the SPI framework */
+       platform_set_drvdata(pdev, drv_data);
+       status = spi_register_master(master);
+       if (status != 0) {
+               dev_err(&pdev->dev, "problem registering spi master\n");
+               goto out_error_queue_alloc;
+       }
+
+       return status;
+
+out_error_queue_alloc:
+       destroy_queue(drv_data);
+
+out_error_clock_enabled:
+       pxa_set_cken(platform_info->clock_enable, 0);
+
+out_error_dma_alloc:
+       if (drv_data->tx_channel != -1)
+               pxa_free_dma(drv_data->tx_channel);
+       if (drv_data->rx_channel != -1)
+               pxa_free_dma(drv_data->rx_channel);
+
+out_error_irq_alloc:
+       free_irq(irq, drv_data);
+
+out_error_master_alloc:
+       spi_master_put(master);
+       return status;
+}
+
+static int pxa2xx_spi_remove(struct platform_device *pdev)
+{
+       struct driver_data *drv_data = platform_get_drvdata(pdev);
+       int irq;
+       int status = 0;
+
+       if (!drv_data)
+               return 0;
+
+       /* Remove the queue */
+       status = destroy_queue(drv_data);
+       if (status != 0)
+               return status;
+
+       /* Disable the SSP at the peripheral and SOC level */
+       write_SSCR0(0, drv_data->ioaddr);
+       pxa_set_cken(drv_data->master_info->clock_enable, 0);
+
+       /* Release DMA */
+       if (drv_data->master_info->enable_dma) {
+               if (drv_data->ioaddr == SSP1_VIRT) {
+                       DRCMRRXSSDR = 0;
+                       DRCMRTXSSDR = 0;
+               } else if (drv_data->ioaddr == SSP2_VIRT) {
+                       DRCMRRXSS2DR = 0;
+                       DRCMRTXSS2DR = 0;
+               } else if (drv_data->ioaddr == SSP3_VIRT) {
+                       DRCMRRXSS3DR = 0;
+                       DRCMRTXSS3DR = 0;
+               }
+               pxa_free_dma(drv_data->tx_channel);
+               pxa_free_dma(drv_data->rx_channel);
+       }
+
+       /* Release IRQ */
+       irq = platform_get_irq(pdev, 0);
+       if (irq >= 0)
+               free_irq(irq, drv_data);
+
+       /* Disconnect from the SPI framework */
+       spi_unregister_master(drv_data->master);
+
+       /* Prevent double remove */
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static void pxa2xx_spi_shutdown(struct platform_device *pdev)
+{
+       int status = 0;
+
+       if ((status = pxa2xx_spi_remove(pdev)) != 0)
+               dev_err(&pdev->dev, "shutdown failed with %d\n", status);
+}
+
+#ifdef CONFIG_PM
+static int suspend_devices(struct device *dev, void *pm_message)
+{
+       pm_message_t *state = pm_message;
+
+       if (dev->power.power_state.event != state->event) {
+               dev_warn(dev, "pm state does not match request\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct driver_data *drv_data = platform_get_drvdata(pdev);
+       int status = 0;
+
+       /* Check all childern for current power state */
+       if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
+               dev_warn(&pdev->dev, "suspend aborted\n");
+               return -1;
+       }
+
+       status = stop_queue(drv_data);
+       if (status != 0)
+               return status;
+       write_SSCR0(0, drv_data->ioaddr);
+       pxa_set_cken(drv_data->master_info->clock_enable, 0);
+
+       return 0;
+}
+
+static int pxa2xx_spi_resume(struct platform_device *pdev)
+{
+       struct driver_data *drv_data = platform_get_drvdata(pdev);
+       int status = 0;
+
+       /* Enable the SSP clock */
+       pxa_set_cken(drv_data->master_info->clock_enable, 1);
+
+       /* Start the queue running */
+       status = start_queue(drv_data);
+       if (status != 0) {
+               dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
+               return status;
+       }
+
+       return 0;
+}
+#else
+#define pxa2xx_spi_suspend NULL
+#define pxa2xx_spi_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver driver = {
+       .driver = {
+               .name = "pxa2xx-spi",
+               .bus = &platform_bus_type,
+               .owner = THIS_MODULE,
+       },
+       .probe = pxa2xx_spi_probe,
+       .remove = __devexit_p(pxa2xx_spi_remove),
+       .shutdown = pxa2xx_spi_shutdown,
+       .suspend = pxa2xx_spi_suspend,
+       .resume = pxa2xx_spi_resume,
+};
+
+static int __init pxa2xx_spi_init(void)
+{
+       platform_driver_register(&driver);
+
+       return 0;
+}
+module_init(pxa2xx_spi_init);
+
+static void __exit pxa2xx_spi_exit(void)
+{
+       platform_driver_unregister(&driver);
+}
+module_exit(pxa2xx_spi_exit);
index 94f5e8ed83a7a8427105201aaa81f14ff5c710c7..7a3f733051e9ce8a2c6102fcf7ddd9add2d620b6 100644 (file)
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master);
 int __init_or_module
 spi_register_master(struct spi_master *master)
 {
-       static atomic_t         dyn_bus_id = ATOMIC_INIT(0);
+       static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<16) - 1);
        struct device           *dev = master->cdev.dev;
        int                     status = -ENODEV;
        int                     dynamic = 0;
@@ -404,7 +404,7 @@ spi_register_master(struct spi_master *master)
                return -ENODEV;
 
        /* convention:  dynamically assigned bus IDs count down from the max */
-       if (master->bus_num == 0) {
+       if (master->bus_num < 0) {
                master->bus_num = atomic_dec_return(&dyn_bus_id);
                dynamic = 1;
        }
@@ -522,7 +522,8 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
 }
 EXPORT_SYMBOL_GPL(spi_sync);
 
-#define        SPI_BUFSIZ      (SMP_CACHE_BYTES)
+/* portable code must never pass more than 32 bytes */
+#define        SPI_BUFSIZ      max(32,SMP_CACHE_BYTES)
 
 static u8      *buf;
 
index f037e5593269e8a79f700e7acf917919e0b13762..dd2f950b21a734ca753a48ddf55fdd31d1b573bc 100644 (file)
@@ -138,6 +138,45 @@ static unsigned bitbang_txrx_32(
        return t->len - count;
 }
 
+int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+       struct spi_bitbang_cs   *cs = spi->controller_state;
+       u8                      bits_per_word;
+       u32                     hz;
+
+       if (t) {
+               bits_per_word = t->bits_per_word;
+               hz = t->speed_hz;
+       } else {
+               bits_per_word = 0;
+               hz = 0;
+       }
+
+       /* spi_transfer level calls that work per-word */
+       if (!bits_per_word)
+               bits_per_word = spi->bits_per_word;
+       if (bits_per_word <= 8)
+               cs->txrx_bufs = bitbang_txrx_8;
+       else if (bits_per_word <= 16)
+               cs->txrx_bufs = bitbang_txrx_16;
+       else if (bits_per_word <= 32)
+               cs->txrx_bufs = bitbang_txrx_32;
+       else
+               return -EINVAL;
+
+       /* nsecs = (clock period)/2 */
+       if (!hz)
+               hz = spi->max_speed_hz;
+       if (hz) {
+               cs->nsecs = (1000000000/2) / hz;
+               if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
+
 /**
  * spi_bitbang_setup - default setup for per-word I/O loops
  */
@@ -145,8 +184,16 @@ int spi_bitbang_setup(struct spi_device *spi)
 {
        struct spi_bitbang_cs   *cs = spi->controller_state;
        struct spi_bitbang      *bitbang;
+       int                     retval;
 
-       if (!spi->max_speed_hz)
+       bitbang = spi_master_get_devdata(spi->master);
+
+       /* REVISIT: some systems will want to support devices using lsb-first
+        * bit encodings on the wire.  In pure software that would be trivial,
+        * just bitbang_txrx_le_cphaX() routines shifting the other way, and
+        * some hardware controllers also have this support.
+        */
+       if ((spi->mode & SPI_LSB_FIRST) != 0)
                return -EINVAL;
 
        if (!cs) {
@@ -155,32 +202,20 @@ int spi_bitbang_setup(struct spi_device *spi)
                        return -ENOMEM;
                spi->controller_state = cs;
        }
-       bitbang = spi_master_get_devdata(spi->master);
 
        if (!spi->bits_per_word)
                spi->bits_per_word = 8;
 
-       /* spi_transfer level calls that work per-word */
-       if (spi->bits_per_word <= 8)
-               cs->txrx_bufs = bitbang_txrx_8;
-       else if (spi->bits_per_word <= 16)
-               cs->txrx_bufs = bitbang_txrx_16;
-       else if (spi->bits_per_word <= 32)
-               cs->txrx_bufs = bitbang_txrx_32;
-       else
-               return -EINVAL;
-
        /* per-word shift register access, in hardware or bitbanging */
        cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
        if (!cs->txrx_word)
                return -EINVAL;
 
-       /* nsecs = (clock period)/2 */
-       cs->nsecs = (1000000000/2) / (spi->max_speed_hz);
-       if (cs->nsecs > MAX_UDELAY_MS * 1000)
-               return -EINVAL;
+       retval = spi_bitbang_setup_transfer(spi, NULL);
+       if (retval < 0)
+               return retval;
 
-       dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
+       dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
                        __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
                        spi->bits_per_word, 2 * cs->nsecs);
 
@@ -246,6 +281,8 @@ static void bitbang_work(void *_bitbang)
                unsigned                tmp;
                unsigned                cs_change;
                int                     status;
+               int                     (*setup_transfer)(struct spi_device *,
+                                               struct spi_transfer *);
 
                m = container_of(bitbang->queue.next, struct spi_message,
                                queue);
@@ -262,6 +299,7 @@ static void bitbang_work(void *_bitbang)
                tmp = 0;
                cs_change = 1;
                status = 0;
+               setup_transfer = NULL;
 
                list_for_each_entry (t, &m->transfers, transfer_list) {
                        if (bitbang->shutdown) {
@@ -269,6 +307,20 @@ static void bitbang_work(void *_bitbang)
                                break;
                        }
 
+                       /* override or restore speed and wordsize */
+                       if (t->speed_hz || t->bits_per_word) {
+                               setup_transfer = bitbang->setup_transfer;
+                               if (!setup_transfer) {
+                                       status = -ENOPROTOOPT;
+                                       break;
+                               }
+                       }
+                       if (setup_transfer) {
+                               status = setup_transfer(spi, t);
+                               if (status < 0)
+                                       break;
+                       }
+
                        /* set up default clock polarity, and activate chip;
                         * this implicitly updates clock and spi modes as
                         * previously recorded for this device via setup().
@@ -325,6 +377,10 @@ static void bitbang_work(void *_bitbang)
                m->status = status;
                m->complete(m->context);
 
+               /* restore speed and wordsize */
+               if (setup_transfer)
+                       setup_transfer(spi, NULL);
+
                /* normally deactivate chipselect ... unless no error and
                 * cs_change has hinted that the next message will probably
                 * be for this chip too.
@@ -348,6 +404,7 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
 {
        struct spi_bitbang      *bitbang;
        unsigned long           flags;
+       int                     status = 0;
 
        m->actual_length = 0;
        m->status = -EINPROGRESS;
@@ -357,11 +414,15 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
                return -ESHUTDOWN;
 
        spin_lock_irqsave(&bitbang->lock, flags);
-       list_add_tail(&m->queue, &bitbang->queue);
-       queue_work(bitbang->workqueue, &bitbang->work);
+       if (!spi->max_speed_hz)
+               status = -ENETDOWN;
+       else {
+               list_add_tail(&m->queue, &bitbang->queue);
+               queue_work(bitbang->workqueue, &bitbang->work);
+       }
        spin_unlock_irqrestore(&bitbang->lock, flags);
 
-       return 0;
+       return status;
 }
 EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
 
@@ -406,6 +467,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
                bitbang->use_dma = 0;
                bitbang->txrx_bufs = spi_bitbang_bufs;
                if (!bitbang->master->setup) {
+                       if (!bitbang->setup_transfer)
+                               bitbang->setup_transfer =
+                                        spi_bitbang_setup_transfer;
                        bitbang->master->setup = spi_bitbang_setup;
                        bitbang->master->cleanup = spi_bitbang_cleanup;
                }
index 334b1db1bd7cc42f52e3a5c3353dd28b7b1c032d..27597c576eff3f4cb0a116dee1d682f8ce4f9383 100644 (file)
@@ -29,12 +29,15 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
 
 static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
 {
-       int rc = -ENXIO, power;
+       int rc = -ENXIO;
        char *endp;
        struct backlight_device *bd = to_backlight_device(cdev);
+       int power = simple_strtoul(buf, &endp, 0);
+       size_t size = endp - buf;
 
-       power = simple_strtoul(buf, &endp, 0);
-       if (*endp && !isspace(*endp))
+       if (*endp && isspace(*endp))
+               size++;
+       if (size != count)
                return -EINVAL;
 
        down(&bd->sem);
@@ -65,12 +68,15 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
 
 static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
 {
-       int rc = -ENXIO, brightness;
+       int rc = -ENXIO;
        char *endp;
        struct backlight_device *bd = to_backlight_device(cdev);
+       int brightness = simple_strtoul(buf, &endp, 0);
+       size_t size = endp - buf;
 
-       brightness = simple_strtoul(buf, &endp, 0);
-       if (*endp && !isspace(*endp))
+       if (*endp && isspace(*endp))
+               size++;
+       if (size != count)
                return -EINVAL;
 
        down(&bd->sem);
index 86908a60c630d9624fab519c24d1ab0f28091e3b..bc8ab005a3fb174a9eecc050a76301469ed2916f 100644 (file)
@@ -31,12 +31,15 @@ static ssize_t lcd_show_power(struct class_device *cdev, char *buf)
 
 static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count)
 {
-       int rc, power;
+       int rc = -ENXIO;
        char *endp;
        struct lcd_device *ld = to_lcd_device(cdev);
+       int power = simple_strtoul(buf, &endp, 0);
+       size_t size = endp - buf;
 
-       power = simple_strtoul(buf, &endp, 0);
-       if (*endp && !isspace(*endp))
+       if (*endp && isspace(*endp))
+               size++;
+       if (size != count)
                return -EINVAL;
 
        down(&ld->sem);
@@ -44,8 +47,7 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
                pr_debug("lcd: set power to %d\n", power);
                ld->props->set_power(ld, power);
                rc = count;
-       } else
-               rc = -ENXIO;
+       }
        up(&ld->sem);
 
        return rc;
@@ -53,14 +55,12 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
 
 static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
 {
-       int rc;
+       int rc = -ENXIO;
        struct lcd_device *ld = to_lcd_device(cdev);
 
        down(&ld->sem);
        if (likely(ld->props && ld->props->get_contrast))
                rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld));
-       else
-               rc = -ENXIO;
        up(&ld->sem);
 
        return rc;
@@ -68,12 +68,15 @@ static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
 
 static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count)
 {
-       int rc, contrast;
+       int rc = -ENXIO;
        char *endp;
        struct lcd_device *ld = to_lcd_device(cdev);
+       int contrast = simple_strtoul(buf, &endp, 0);
+       size_t size = endp - buf;
 
-       contrast = simple_strtoul(buf, &endp, 0);
-       if (*endp && !isspace(*endp))
+       if (*endp && isspace(*endp))
+               size++;
+       if (size != count)
                return -EINVAL;
 
        down(&ld->sem);
@@ -81,8 +84,7 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
                pr_debug("lcd: set contrast to %d\n", contrast);
                ld->props->set_contrast(ld, contrast);
                rc = count;
-       } else
-               rc = -ENXIO;
+       }
        up(&ld->sem);
 
        return rc;
@@ -90,14 +92,12 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
 
 static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf)
 {
-       int rc;
+       int rc = -ENXIO;
        struct lcd_device *ld = to_lcd_device(cdev);
 
        down(&ld->sem);
        if (likely(ld->props))
                rc = sprintf(buf, "%d\n", ld->props->max_contrast);
-       else
-               rc = -ENXIO;
        up(&ld->sem);
 
        return rc;
index 71742ba150c44f12a4abb3b4ce45c15fcc15e775..6f2617820a4ef3ad001bad832978ea55ad379774 100644 (file)
@@ -98,23 +98,20 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname,
 static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc,
        struct v9fs_fcall *rc, int err)
 {
-       int fid;
+       int fid, id;
        struct v9fs_session_info *v9ses;
 
-       if (err)
-               return;
-
+       id = 0;
        fid = tc->params.tclunk.fid;
-       kfree(tc);
-
-       if (!rc)
-               return;
-
-       v9ses = a;
-       if (rc->id == RCLUNK)
-               v9fs_put_idpool(fid, &v9ses->fidpool);
+       if (rc)
+               id = rc->id;
 
+       kfree(tc);
        kfree(rc);
+       if (id == RCLUNK) {
+               v9ses = a;
+               v9fs_put_idpool(fid, &v9ses->fidpool);
+       }
 }
 
 /**
index 3e5b124a72123d4c50a99c44351d87a0a21d5388..f4407eb276c7cfa7d350061792b08e0393d4ae3b 100644 (file)
@@ -50,15 +50,23 @@ enum {
        Wpending = 8,           /* can write */
 };
 
+enum {
+       None,
+       Flushing,
+       Flushed,
+};
+
 struct v9fs_mux_poll_task;
 
 struct v9fs_req {
+       spinlock_t lock;
        int tag;
        struct v9fs_fcall *tcall;
        struct v9fs_fcall *rcall;
        int err;
        v9fs_mux_req_callback cb;
        void *cba;
+       int flush;
        struct list_head req_list;
 };
 
@@ -96,8 +104,8 @@ struct v9fs_mux_poll_task {
 
 struct v9fs_mux_rpc {
        struct v9fs_mux_data *m;
-       struct v9fs_req *req;
        int err;
+       struct v9fs_fcall *tcall;
        struct v9fs_fcall *rcall;
        wait_queue_head_t wqueue;
 };
@@ -524,10 +532,9 @@ again:
 
 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
 {
-       int ecode, tag;
+       int ecode;
        struct v9fs_str *ename;
 
-       tag = req->tag;
        if (!req->err && req->rcall->id == RERROR) {
                ecode = req->rcall->params.rerror.errno;
                ename = &req->rcall->params.rerror.error;
@@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
                if (!req->err)
                        req->err = -EIO;
        }
-
-       if (req->err == ERREQFLUSH)
-               return;
-
-       if (req->cb) {
-               dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
-                       req->tcall, req->rcall);
-
-               (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
-               req->cb = NULL;
-       } else
-               kfree(req->rcall);
-
-       v9fs_mux_put_tag(m, tag);
-
-       wake_up(&m->equeue);
-       kfree(req);
 }
 
 /**
@@ -669,17 +659,26 @@ static void v9fs_read_work(void *a)
                list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
                        if (rreq->tag == rcall->tag) {
                                req = rreq;
-                               req->rcall = rcall;
-                               list_del(&req->req_list);
-                               spin_unlock(&m->lock);
-                               process_request(m, req);
+                               if (req->flush != Flushing)
+                                       list_del(&req->req_list);
                                break;
                        }
-
                }
+               spin_unlock(&m->lock);
 
-               if (!req) {
-                       spin_unlock(&m->lock);
+               if (req) {
+                       req->rcall = rcall;
+                       process_request(m, req);
+
+                       if (req->flush != Flushing) {
+                               if (req->cb)
+                                       (*req->cb) (req, req->cba);
+                               else
+                                       kfree(req->rcall);
+
+                               wake_up(&m->equeue);
+                       }
+               } else {
                        if (err >= 0 && rcall->id != RFLUSH)
                                dprintk(DEBUG_ERROR,
                                        "unexpected response mux %p id %d tag %d\n",
@@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
                return ERR_PTR(-ENOMEM);
 
        v9fs_set_tag(tc, n);
-
        if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
                char buf[150];
 
@@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
                printk(KERN_NOTICE "<<< %p %s\n", m, buf);
        }
 
+       spin_lock_init(&req->lock);
        req->tag = n;
        req->tcall = tc;
        req->rcall = NULL;
        req->err = 0;
        req->cb = cb;
        req->cba = cba;
+       req->flush = None;
 
        spin_lock(&m->lock);
        list_add_tail(&req->req_list, &m->unsent_req_list);
@@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
        return req;
 }
 
-static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc,
-                             struct v9fs_fcall *rc, int err)
+static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
+{
+       v9fs_mux_put_tag(m, req->tag);
+       kfree(req);
+}
+
+static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
 {
        v9fs_mux_req_callback cb;
        int tag;
        struct v9fs_mux_data *m;
-       struct v9fs_req *req, *rptr;
+       struct v9fs_req *req, *rreq, *rptr;
 
        m = a;
-       dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
-               rc, err, tc->params.tflush.oldtag);
+       dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
+               freq->tcall, freq->rcall, freq->err,
+               freq->tcall->params.tflush.oldtag);
 
        spin_lock(&m->lock);
        cb = NULL;
-       tag = tc->params.tflush.oldtag;
-       list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
-               if (req->tag == tag) {
+       tag = freq->tcall->params.tflush.oldtag;
+       req = NULL;
+       list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+               if (rreq->tag == tag) {
+                       req = rreq;
                        list_del(&req->req_list);
-                       if (req->cb) {
-                               cb = req->cb;
-                               req->cb = NULL;
-                               spin_unlock(&m->lock);
-                               (*cb) (req->cba, req->tcall, req->rcall,
-                                      req->err);
-                       }
-                       kfree(req);
-                       wake_up(&m->equeue);
                        break;
                }
        }
+       spin_unlock(&m->lock);
 
-       if (!cb)
-               spin_unlock(&m->lock);
+       if (req) {
+               spin_lock(&req->lock);
+               req->flush = Flushed;
+               spin_unlock(&req->lock);
+
+               if (req->cb)
+                       (*req->cb) (req, req->cba);
+               else
+                       kfree(req->rcall);
+
+               wake_up(&m->equeue);
+       }
 
-       v9fs_mux_put_tag(m, tag);
-       kfree(tc);
-       kfree(rc);
+       kfree(freq->tcall);
+       kfree(freq->rcall);
+       v9fs_mux_free_request(m, freq);
 }
 
-static void
+static int
 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
 {
        struct v9fs_fcall *fc;
+       struct v9fs_req *rreq, *rptr;
 
        dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
 
+       /* if a response was received for a request, do nothing */
+       spin_lock(&req->lock);
+       if (req->rcall || req->err) {
+               spin_unlock(&req->lock);
+               dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
+               return 0;
+       }
+
+       req->flush = Flushing;
+       spin_unlock(&req->lock);
+
+       spin_lock(&m->lock);
+       /* if the request is not sent yet, just remove it from the list */
+       list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
+               if (rreq->tag == req->tag) {
+                       dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
+                       list_del(&rreq->req_list);
+                       req->flush = Flushed;
+                       spin_unlock(&m->lock);
+                       if (req->cb)
+                               (*req->cb) (req, req->cba);
+                       return 0;
+               }
+       }
+       spin_unlock(&m->lock);
+
+       clear_thread_flag(TIF_SIGPENDING);
        fc = v9fs_create_tflush(req->tag);
        v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
+       return 1;
 }
 
 static void
-v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
+v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
 {
        struct v9fs_mux_rpc *r;
 
-       if (err == ERREQFLUSH) {
-               kfree(rc);
-               dprintk(DEBUG_MUX, "err req flush\n");
-               return;
-       }
-
+       dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
        r = a;
-       dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
-               tc, rc, err);
-       r->rcall = rc;
-       r->err = err;
+       r->rcall = req->rcall;
+       r->err = req->err;
+
+       if (req->flush!=None && !req->err)
+               r->err = -ERESTARTSYS;
+
        wake_up(&r->wqueue);
 }
 
@@ -856,12 +892,13 @@ int
 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
             struct v9fs_fcall **rc)
 {
-       int err;
+       int err, sigpending;
        unsigned long flags;
        struct v9fs_req *req;
        struct v9fs_mux_rpc r;
 
        r.err = 0;
+       r.tcall = tc;
        r.rcall = NULL;
        r.m = m;
        init_waitqueue_head(&r.wqueue);
@@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
        if (rc)
                *rc = NULL;
 
+       sigpending = 0;
+       if (signal_pending(current)) {
+               sigpending = 1;
+               clear_thread_flag(TIF_SIGPENDING);
+       }
+
        req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                dprintk(DEBUG_MUX, "error %d\n", err);
-               return PTR_ERR(req);
+               return err;
        }
 
-       r.req = req;
-       dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
-               req->tag, &r, req);
        err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
        if (r.err < 0)
                err = r.err;
 
        if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
-               spin_lock(&m->lock);
-               req->tcall = NULL;
-               req->err = ERREQFLUSH;
-               spin_unlock(&m->lock);
+               if (v9fs_mux_flush_request(m, req)) {
+                       /* wait until we get response of the flush message */
+                       do {
+                               clear_thread_flag(TIF_SIGPENDING);
+                               err = wait_event_interruptible(r.wqueue,
+                                       r.rcall || r.err);
+                       } while (!r.rcall && !r.err && err==-ERESTARTSYS &&
+                               m->trans->status==Connected && !m->err);
+               }
+               sigpending = 1;
+       }
 
-               clear_thread_flag(TIF_SIGPENDING);
-               v9fs_mux_flush_request(m, req);
+       if (sigpending) {
                spin_lock_irqsave(&current->sighand->siglock, flags);
                recalc_sigpending();
                spin_unlock_irqrestore(&current->sighand->siglock, flags);
        }
 
-       if (!err) {
-               if (r.rcall)
-                       dprintk(DEBUG_MUX, "got response id %d tag %d\n",
-                               r.rcall->id, r.rcall->tag);
-
-               if (rc)
-                       *rc = r.rcall;
-               else
-                       kfree(r.rcall);
-       } else {
+       if (rc)
+               *rc = r.rcall;
+       else
                kfree(r.rcall);
-               dprintk(DEBUG_MUX, "got error %d\n", err);
-               if (err > 0)
-                       err = -EIO;
-       }
+
+       v9fs_mux_free_request(m, req);
+       if (err > 0)
+               err = -EIO;
 
        return err;
 }
@@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
        struct v9fs_req *req, *rtmp;
        LIST_HEAD(cancel_list);
 
-       dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
+       dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
        m->err = err;
        spin_lock(&m->lock);
        list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
                list_move(&req->req_list, &cancel_list);
        }
+       list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+               list_move(&req->req_list, &cancel_list);
+       }
        spin_unlock(&m->lock);
 
        list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
@@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
                        req->err = err;
 
                if (req->cb)
-                       (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
+                       (*req->cb) (req, req->cba);
                else
                        kfree(req->rcall);
-
-               kfree(req);
        }
 
        wake_up(&m->equeue);
index e90bfd32ea42335ebd411e0d82423b34a38e5d0d..fb10c50186a11b6bb23323d845f918d535936a3c 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 struct v9fs_mux_data;
+struct v9fs_req;
 
 /**
  * v9fs_mux_req_callback - callback function that is called when the
@@ -36,8 +37,7 @@ struct v9fs_mux_data;
  * @rc - response call
  * @err - error code (non-zero if error occured)
  */
-typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc,
-       struct v9fs_fcall *rc, int err);
+typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a);
 
 int v9fs_mux_global_init(void);
 void v9fs_mux_global_exit(void);
index 083dcfcd158e1cd3df4e445fdecabdd3c286f8eb..1a8e46084f0e1b25f14dc12417429dd79e94529f 100644 (file)
@@ -72,11 +72,17 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                return -ENOSPC;
        }
 
-       err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL);
+       err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall);
        if (err < 0) {
                dprintk(DEBUG_ERROR, "rewalk didn't work\n");
-               goto put_fid;
+               if (fcall && fcall->id == RWALK)
+                       goto clunk_fid;
+               else {
+                       v9fs_put_idpool(fid, &v9ses->fidpool);
+                       goto free_fcall;
+               }
        }
+       kfree(fcall);
 
        /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */
        /* translate open mode appropriately */
@@ -109,8 +115,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
 clunk_fid:
        v9fs_t_clunk(v9ses, fid);
 
-put_fid:
-       v9fs_put_idpool(fid, &v9ses->fidpool);
+free_fcall:
        kfree(fcall);
 
        return err;
index 133db366d30659aad05be2c18a383e5722eb1f71..2cb87ba4b1c1fa76ab685d213ae6e0f33a53dcf8 100644 (file)
@@ -270,7 +270,10 @@ v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
        err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall);
        if (err < 0) {
                PRINT_FCALL_ERROR("clone error", fcall);
-               goto put_fid;
+               if (fcall && fcall->id == RWALK)
+                       goto clunk_fid;
+               else
+                       goto put_fid;
        }
        kfree(fcall);
 
@@ -322,6 +325,9 @@ v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
                &fcall);
 
        if (err < 0) {
+               if (fcall && fcall->id == RWALK)
+                       goto clunk_fid;
+
                PRINT_FCALL_ERROR("walk error", fcall);
                v9fs_put_idpool(nfid, &v9ses->fidpool);
                goto error;
@@ -640,19 +646,26 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        }
 
        result = v9fs_t_walk(v9ses, dirfidnum, newfid,
-               (char *)dentry->d_name.name, NULL);
+               (char *)dentry->d_name.name, &fcall);
+
        if (result < 0) {
-               v9fs_put_idpool(newfid, &v9ses->fidpool);
+               if (fcall && fcall->id == RWALK)
+                       v9fs_t_clunk(v9ses, newfid);
+               else
+                       v9fs_put_idpool(newfid, &v9ses->fidpool);
+
                if (result == -ENOENT) {
                        d_add(dentry, NULL);
                        dprintk(DEBUG_VFS,
                                "Return negative dentry %p count %d\n",
                                dentry, atomic_read(&dentry->d_count));
+                       kfree(fcall);
                        return NULL;
                }
                dprintk(DEBUG_ERROR, "walk error:%d\n", result);
                goto FreeFcall;
        }
+       kfree(fcall);
 
        result = v9fs_t_stat(v9ses, newfid, &fcall);
        if (result < 0) {
index 83bf478e786b3f64f1d5a90826f5d92622077950..078d3d1191a511f3ad88ac0167142fda8c10c5a9 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_DNOTIFY)         += dnotify.o
 obj-$(CONFIG_PROC_FS)          += proc/
 obj-y                          += partitions/
 obj-$(CONFIG_SYSFS)            += sysfs/
+obj-$(CONFIG_CONFIGFS_FS)      += configfs/
 obj-y                          += devpts/
 
 obj-$(CONFIG_PROFILING)                += dcookies.o
@@ -100,5 +101,4 @@ obj-$(CONFIG_BEFS_FS)               += befs/
 obj-$(CONFIG_HOSTFS)           += hostfs/
 obj-$(CONFIG_HPPFS)            += hppfs/
 obj-$(CONFIG_DEBUG_FS)         += debugfs/
-obj-$(CONFIG_CONFIGFS_FS)      += configfs/
 obj-$(CONFIG_OCFS2_FS)         += ocfs2/
index 57c4903614e58c29a2eae1462f1f46c1ca8083ec..d6603d02304ceed0d0a1c0e6c80e497bcaf58394 100644 (file)
@@ -74,8 +74,8 @@ struct autofs_wait_queue {
        struct autofs_wait_queue *next;
        autofs_wqt_t wait_queue_token;
        /* We use the following to see what we are waiting for */
-       int hash;
-       int len;
+       unsigned int hash;
+       unsigned int len;
        char *name;
        u32 dev;
        u64 ino;
@@ -85,7 +85,6 @@ struct autofs_wait_queue {
        pid_t tgid;
        /* This is for status reporting upon return */
        int status;
-       atomic_t notify;
        atomic_t wait_ctr;
 };
 
index 84e030c8ddd0f99047473308afc4d26f555e2c14..5100f984783faa123caabf76ad97e1718bdea70e 100644 (file)
@@ -327,6 +327,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+       struct autofs_info *ino = autofs4_dentry_ino(dentry);
        int oz_mode = autofs4_oz_mode(sbi);
        unsigned int lookup_type;
        int status;
@@ -340,13 +341,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
        if (oz_mode || !lookup_type)
                goto done;
 
-       /*
-        * If a request is pending wait for it.
-        * If it's a mount then it won't be expired till at least
-        * a liitle later and if it's an expire then we might need
-        * to mount it again.
-        */
-       if (autofs4_ispending(dentry)) {
+       /* If an expire request is pending wait for it. */
+       if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) {
                DPRINTK("waiting for active request %p name=%.*s",
                        dentry, dentry->d_name.len, dentry->d_name.name);
 
index 142ab6aa2aa1516e6480d24c0ce3830b19852107..ce103e7b0bc360f684a2556c55ea9ed8be0e38f2 100644 (file)
@@ -189,14 +189,30 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
        return len;
 }
 
+static struct autofs_wait_queue *
+autofs4_find_wait(struct autofs_sb_info *sbi,
+                 char *name, unsigned int hash, unsigned int len)
+{
+       struct autofs_wait_queue *wq;
+
+       for (wq = sbi->queues; wq; wq = wq->next) {
+               if (wq->hash == hash &&
+                   wq->len == len &&
+                   wq->name && !memcmp(wq->name, name, len))
+                       break;
+       }
+       return wq;
+}
+
 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
                enum autofs_notify notify)
 {
+       struct autofs_info *ino;
        struct autofs_wait_queue *wq;
        char *name;
        unsigned int len = 0;
        unsigned int hash = 0;
-       int status;
+       int status, type;
 
        /* In catatonic mode, we don't wait for nobody */
        if (sbi->catatonic)
@@ -223,21 +239,41 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
                return -EINTR;
        }
 
-       for (wq = sbi->queues ; wq ; wq = wq->next) {
-               if (wq->hash == dentry->d_name.hash &&
-                   wq->len == len &&
-                   wq->name && !memcmp(wq->name, name, len))
-                       break;
-       }
+       wq = autofs4_find_wait(sbi, name, hash, len);
+       ino = autofs4_dentry_ino(dentry);
+       if (!wq && ino && notify == NFY_NONE) {
+               /*
+                * Either we've betean the pending expire to post it's
+                * wait or it finished while we waited on the mutex.
+                * So we need to wait till either, the wait appears
+                * or the expire finishes.
+                */
+
+               while (ino->flags & AUTOFS_INF_EXPIRING) {
+                       mutex_unlock(&sbi->wq_mutex);
+                       schedule_timeout_interruptible(HZ/10);
+                       if (mutex_lock_interruptible(&sbi->wq_mutex)) {
+                               kfree(name);
+                               return -EINTR;
+                       }
+                       wq = autofs4_find_wait(sbi, name, hash, len);
+                       if (wq)
+                               break;
+               }
 
-       if (!wq) {
-               /* Can't wait for an expire if there's no mount */
-               if (notify == NFY_NONE && !d_mountpoint(dentry)) {
+               /*
+                * Not ideal but the status has already gone. Of the two
+                * cases where we wait on NFY_NONE neither depend on the
+                * return status of the wait.
+                */
+               if (!wq) {
                        kfree(name);
                        mutex_unlock(&sbi->wq_mutex);
-                       return -ENOENT;
+                       return 0;
                }
+       }
 
+       if (!wq) {
                /* Create a new wait queue */
                wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
                if (!wq) {
@@ -263,20 +299,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
                wq->tgid = current->tgid;
                wq->status = -EINTR; /* Status return if interrupted */
                atomic_set(&wq->wait_ctr, 2);
-               atomic_set(&wq->notify, 1);
-               mutex_unlock(&sbi->wq_mutex);
-       } else {
-               atomic_inc(&wq->wait_ctr);
                mutex_unlock(&sbi->wq_mutex);
-               kfree(name);
-               DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
-                       (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
-       }
-
-       if (notify != NFY_NONE && atomic_read(&wq->notify)) {
-               int type;
-
-               atomic_dec(&wq->notify);
 
                if (sbi->version < 5) {
                        if (notify == NFY_MOUNT)
@@ -299,6 +322,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
 
                /* autofs4_notify_daemon() may block */
                autofs4_notify_daemon(sbi, wq, type);
+       } else {
+               atomic_inc(&wq->wait_ctr);
+               mutex_unlock(&sbi->wq_mutex);
+               kfree(name);
+               DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
+                       (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
        }
 
        /* wq->name is NULL if and only if the lock is already released */
index 970888aad84383d6d708d347593e461165b4d2a1..01f39f87f372ec08bfd932dd92a3dc5e646d20d6 100644 (file)
@@ -1913,7 +1913,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
        }
 
        if (sigmask) {
-               if (sigsetsize |= sizeof(compat_sigset_t))
+               if (sigsetsize != sizeof(compat_sigset_t))
                        return -EINVAL;
                if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
                        return -EFAULT;
index 5638c8f9362f9222419e1cff3f80e50dca646abc..5f952187fc53575795358785cdb586bd7093b8c9 100644 (file)
@@ -505,13 +505,15 @@ static int populate_groups(struct config_group *group)
        int i;
 
        if (group->default_groups) {
-               /* FYI, we're faking mkdir here
+               /*
+                * FYI, we're faking mkdir here
                 * I'm not sure we need this semaphore, as we're called
                 * from our parent's mkdir.  That holds our parent's
                 * i_mutex, so afaik lookup cannot continue through our
                 * parent to find us, let alone mess with our tree.
                 * That said, taking our i_mutex is closer to mkdir
-                * emulation, and shouldn't hurt. */
+                * emulation, and shouldn't hurt.
+                */
                mutex_lock(&dentry->d_inode->i_mutex);
 
                for (i = 0; group->default_groups[i]; i++) {
@@ -546,20 +548,34 @@ static void unlink_obj(struct config_item *item)
 
                item->ci_group = NULL;
                item->ci_parent = NULL;
+
+               /* Drop the reference for ci_entry */
                config_item_put(item);
 
+               /* Drop the reference for ci_parent */
                config_group_put(group);
        }
 }
 
 static void link_obj(struct config_item *parent_item, struct config_item *item)
 {
-       /* Parent seems redundant with group, but it makes certain
-        * traversals much nicer. */
+       /*
+        * Parent seems redundant with group, but it makes certain
+        * traversals much nicer.
+        */
        item->ci_parent = parent_item;
+
+       /*
+        * We hold a reference on the parent for the child's ci_parent
+        * link.
+        */
        item->ci_group = config_group_get(to_config_group(parent_item));
        list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
 
+       /*
+        * We hold a reference on the child for ci_entry on the parent's
+        * cg_children
+        */
        config_item_get(item);
 }
 
@@ -684,6 +700,10 @@ static void client_drop_item(struct config_item *parent_item,
        type = parent_item->ci_type;
        BUG_ON(!type);
 
+       /*
+        * If ->drop_item() exists, it is responsible for the
+        * config_item_put().
+        */
        if (type->ct_group_ops && type->ct_group_ops->drop_item)
                type->ct_group_ops->drop_item(to_config_group(parent_item),
                                                item);
@@ -694,23 +714,28 @@ static void client_drop_item(struct config_item *parent_item,
 
 static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 {
-       int ret;
+       int ret, module_got = 0;
        struct config_group *group;
        struct config_item *item;
        struct config_item *parent_item;
        struct configfs_subsystem *subsys;
        struct configfs_dirent *sd;
        struct config_item_type *type;
-       struct module *owner;
+       struct module *owner = NULL;
        char *name;
 
-       if (dentry->d_parent == configfs_sb->s_root)
-               return -EPERM;
+       if (dentry->d_parent == configfs_sb->s_root) {
+               ret = -EPERM;
+               goto out;
+       }
 
        sd = dentry->d_parent->d_fsdata;
-       if (!(sd->s_type & CONFIGFS_USET_DIR))
-               return -EPERM;
+       if (!(sd->s_type & CONFIGFS_USET_DIR)) {
+               ret = -EPERM;
+               goto out;
+       }
 
+       /* Get a working ref for the duration of this function */
        parent_item = configfs_get_config_item(dentry->d_parent);
        type = parent_item->ci_type;
        subsys = to_config_group(parent_item)->cg_subsys;
@@ -719,15 +744,16 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        if (!type || !type->ct_group_ops ||
            (!type->ct_group_ops->make_group &&
             !type->ct_group_ops->make_item)) {
-               config_item_put(parent_item);
-               return -EPERM;  /* What lack-of-mkdir returns */
+               ret = -EPERM;  /* Lack-of-mkdir returns -EPERM */
+               goto out_put;
        }
 
        name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
        if (!name) {
-               config_item_put(parent_item);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_put;
        }
+
        snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
 
        down(&subsys->su_sem);
@@ -748,40 +774,67 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        kfree(name);
        if (!item) {
-               config_item_put(parent_item);
-               return -ENOMEM;
+               /*
+                * If item == NULL, then link_obj() was never called.
+                * There are no extra references to clean up.
+                */
+               ret = -ENOMEM;
+               goto out_put;
        }
 
-       ret = -EINVAL;
+       /*
+        * link_obj() has been called (via link_group() for groups).
+        * From here on out, errors must clean that up.
+        */
+
        type = item->ci_type;
-       if (type) {
-               owner = type->ct_owner;
-               if (try_module_get(owner)) {
-                       if (group) {
-                               ret = configfs_attach_group(parent_item,
-                                                           item,
-                                                           dentry);
-                       } else {
-                               ret = configfs_attach_item(parent_item,
-                                                          item,
-                                                          dentry);
-                       }
+       if (!type) {
+               ret = -EINVAL;
+               goto out_unlink;
+       }
 
-                       if (ret) {
-                               down(&subsys->su_sem);
-                               if (group)
-                                       unlink_group(group);
-                               else
-                                       unlink_obj(item);
-                               client_drop_item(parent_item, item);
-                               up(&subsys->su_sem);
+       owner = type->ct_owner;
+       if (!try_module_get(owner)) {
+               ret = -EINVAL;
+               goto out_unlink;
+       }
 
-                               config_item_put(parent_item);
-                               module_put(owner);
-                       }
-               }
+       /*
+        * I hate doing it this way, but if there is
+        * an error,  module_put() probably should
+        * happen after any cleanup.
+        */
+       module_got = 1;
+
+       if (group)
+               ret = configfs_attach_group(parent_item, item, dentry);
+       else
+               ret = configfs_attach_item(parent_item, item, dentry);
+
+out_unlink:
+       if (ret) {
+               /* Tear down everything we built up */
+               down(&subsys->su_sem);
+               if (group)
+                       unlink_group(group);
+               else
+                       unlink_obj(item);
+               client_drop_item(parent_item, item);
+               up(&subsys->su_sem);
+
+               if (module_got)
+                       module_put(owner);
        }
 
+out_put:
+       /*
+        * link_obj()/link_group() took a reference from child->parent,
+        * so the parent is safely pinned.  We can drop our working
+        * reference.
+        */
+       config_item_put(parent_item);
+
+out:
        return ret;
 }
 
@@ -801,6 +854,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (sd->s_type & CONFIGFS_USET_DEFAULT)
                return -EPERM;
 
+       /* Get a working ref until we have the child */
        parent_item = configfs_get_config_item(dentry->d_parent);
        subsys = to_config_group(parent_item)->cg_subsys;
        BUG_ON(!subsys);
@@ -817,6 +871,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
                return ret;
        }
 
+       /* Get a working ref for the duration of this function */
        item = configfs_get_config_item(dentry);
 
        /* Drop reference from above, item already holds one. */
index d4d0c41490cddad3a99c89727dd848a405693520..1d46677afd172b5742a4ccd2472b4d59f82a2800 100644 (file)
@@ -438,7 +438,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
        if (c->mtd->point) {
                err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
                if (!err && retlen < tn->csize) {
-                       JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize);
+                       JFFS2_WARNING("MTD point returned len too short: %zu "
+                                       "instead of %u.\n", retlen, tn->csize);
                        c->mtd->unpoint(c->mtd, buffer, ofs, len);
                } else if (err)
                        JFFS2_WARNING("MTD point failed: error code %d.\n", err);
@@ -461,7 +462,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
                }
 
                if (retlen != len) {
-                       JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len);
+                       JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n",
+                                       ofs, retlen, len);
                        err = -EIO;
                        goto free_out;
                }
index 2c5f1f80bdc28b868cb81aabecdae71bc9c6b480..bf478addb852b70c1ec036e33100f93031c5dc6b 100644 (file)
@@ -899,13 +899,11 @@ static int do_change_type(struct nameidata *nd, int flag)
 /*
  * do loopback mount.
  */
-static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags)
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
 {
        struct nameidata old_nd;
        struct vfsmount *mnt = NULL;
-       int recurse = flags & MS_REC;
        int err = mount_is_safe(nd);
-
        if (err)
                return err;
        if (!old_name || !*old_name)
@@ -939,7 +937,6 @@ static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags
                spin_unlock(&vfsmount_lock);
                release_mounts(&umount_list);
        }
-       mnt->mnt_flags = mnt_flags;
 
 out:
        up_write(&namespace_sem);
@@ -1353,7 +1350,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
                retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
                                    data_page);
        else if (flags & MS_BIND)
-               retval = do_loopback(&nd, dev_name, flags, mnt_flags);
+               retval = do_loopback(&nd, dev_name, flags & MS_REC);
        else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
                retval = do_change_type(&nd, flags);
        else if (flags & MS_MOVE)
index 0d858d0b25be70f496e1fb0ec863ec4778ae3c57..47152bf9a7f26cdfb82cd02240e5546ba3e47485 100644 (file)
@@ -276,13 +276,29 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
        return ret;
 }
 
+/* This can also be called from ocfs2_write_zero_page() which has done
+ * it's own cluster locking. */
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+                              unsigned from, unsigned to)
+{
+       int ret;
+
+       down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       ret = block_prepare_write(page, from, to, ocfs2_get_block);
+
+       up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       return ret;
+}
+
 /*
  * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
  * from loopback.  It must be able to perform its own locking around
  * ocfs2_get_block().
  */
-int ocfs2_prepare_write(struct file *file, struct page *page,
-                       unsigned from, unsigned to)
+static int ocfs2_prepare_write(struct file *file, struct page *page,
+                              unsigned from, unsigned to)
 {
        struct inode *inode = page->mapping->host;
        int ret;
@@ -295,11 +311,7 @@ int ocfs2_prepare_write(struct file *file, struct page *page,
                goto out;
        }
 
-       down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
-       ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
-       up_read(&OCFS2_I(inode)->ip_alloc_sem);
+       ret = ocfs2_prepare_write_nolock(inode, page, from, to);
 
        ocfs2_meta_unlock(inode, 0);
 out:
@@ -625,11 +637,31 @@ static ssize_t ocfs2_direct_IO(int rw,
        int ret;
 
        mlog_entry_void();
+
+       /*
+        * We get PR data locks even for O_DIRECT.  This allows
+        * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
+        * extending and buffered zeroing writes race.  If they did
+        * race then the buffered zeroing could be written back after
+        * the O_DIRECT I/O.  It's one thing to tell people not to mix
+        * buffered and O_DIRECT writes, but expecting them to
+        * understand that file extension is also an implicit buffered
+        * write is too much.  By getting the PR we force writeback of
+        * the buffered zeroing before proceeding.
+        */
+       ret = ocfs2_data_lock(inode, 0);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+       ocfs2_data_unlock(inode, 0);
+
        ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
                                            inode->i_sb->s_bdev, iov, offset,
                                            nr_segs, 
                                            ocfs2_direct_IO_get_blocks,
                                            ocfs2_dio_end_io);
+out:
        mlog_exit(ret);
        return ret;
 }
index d40456d509a00e8ddecac4e28a26d1482140f23f..e88c3f0b8fa9f3bfc5078f992360cd3cbea8f69c 100644 (file)
@@ -22,8 +22,8 @@
 #ifndef OCFS2_AOPS_H
 #define OCFS2_AOPS_H
 
-int ocfs2_prepare_write(struct file *file, struct page *page,
-                       unsigned from, unsigned to);
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+                              unsigned from, unsigned to);
 
 struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
                                                         struct page *page,
index 4601fc256f111254e41d124d08e95f216357b11e..1a5c69071df642afbe99a04ed348b33c955c9861 100644 (file)
@@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode,
 
        ret = -ENOMEM;
        ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
-                                       GFP_KERNEL);
+                                       GFP_NOFS);
        if (!ctxt.new_ent) {
                mlog_errno(ret);
                return ret;
@@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode,
                if (ctxt.need_left && !ctxt.left_ent) {
                        ctxt.left_ent =
                                kmem_cache_alloc(ocfs2_em_ent_cachep,
-                                                GFP_KERNEL);
+                                                GFP_NOFS);
                        if (!ctxt.left_ent)
                                break;
                }
                if (ctxt.need_right && !ctxt.right_ent) {
                        ctxt.right_ent =
                                kmem_cache_alloc(ocfs2_em_ent_cachep,
-                                                GFP_KERNEL);
+                                                GFP_NOFS);
                        if (!ctxt.right_ent)
                                break;
                }
index 581eb451a41a36796a2a2d55b7463028d4dae10e..a9559c874530ebee3e5ccacefbccc03058af3ca7 100644 (file)
@@ -613,7 +613,8 @@ leave:
 
 /* Some parts of this taken from generic_cont_expand, which turned out
  * to be too fragile to do exactly what we need without us having to
- * worry about recursive locking in ->commit_write(). */
+ * worry about recursive locking in ->prepare_write() and
+ * ->commit_write(). */
 static int ocfs2_write_zero_page(struct inode *inode,
                                 u64 size)
 {
@@ -641,7 +642,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
                goto out;
        }
 
-       ret = ocfs2_prepare_write(NULL, page, offset, offset);
+       ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
        if (ret < 0) {
                mlog_errno(ret);
                goto out_unlock;
@@ -695,13 +696,26 @@ out:
        return ret;
 }
 
+/* 
+ * A tail_to_skip value > 0 indicates that we're being called from
+ * ocfs2_file_aio_write(). This has the following implications:
+ *
+ * - we don't want to update i_size
+ * - di_bh will be NULL, which is fine because it's only used in the
+ *   case where we want to update i_size.
+ * - ocfs2_zero_extend() will then only be filling the hole created
+ *   between i_size and the start of the write.
+ */
 static int ocfs2_extend_file(struct inode *inode,
                             struct buffer_head *di_bh,
-                            u64 new_i_size)
+                            u64 new_i_size,
+                            size_t tail_to_skip)
 {
        int ret = 0;
        u32 clusters_to_add;
 
+       BUG_ON(!tail_to_skip && !di_bh);
+
        /* setattr sometimes calls us like this. */
        if (new_i_size == 0)
                goto out;
@@ -714,27 +728,44 @@ static int ocfs2_extend_file(struct inode *inode,
                OCFS2_I(inode)->ip_clusters;
 
        if (clusters_to_add) {
-               ret = ocfs2_extend_allocation(inode, clusters_to_add);
+               /* 
+                * protect the pages that ocfs2_zero_extend is going to
+                * be pulling into the page cache.. we do this before the
+                * metadata extend so that we don't get into the situation
+                * where we've extended the metadata but can't get the data
+                * lock to zero.
+                */
+               ret = ocfs2_data_lock(inode, 1);
                if (ret < 0) {
                        mlog_errno(ret);
                        goto out;
                }
 
-               ret = ocfs2_zero_extend(inode, new_i_size);
+               ret = ocfs2_extend_allocation(inode, clusters_to_add);
                if (ret < 0) {
                        mlog_errno(ret);
-                       goto out;
+                       goto out_unlock;
                }
-       } 
 
-       /* No allocation required, we just use this helper to
-        * do a trivial update of i_size. */
-       ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
-       if (ret < 0) {
-               mlog_errno(ret);
-               goto out;
+               ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       goto out_unlock;
+               }
+       }
+
+       if (!tail_to_skip) {
+               /* We're being called from ocfs2_setattr() which wants
+                * us to update i_size */
+               ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+               if (ret < 0)
+                       mlog_errno(ret);
        }
 
+out_unlock:
+       if (clusters_to_add) /* this is the only case in which we lock */
+               ocfs2_data_unlock(inode, 1);
+
 out:
        return ret;
 }
@@ -793,7 +824,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                if (i_size_read(inode) > attr->ia_size)
                        status = ocfs2_truncate_file(inode, bh, attr->ia_size);
                else
-                       status = ocfs2_extend_file(inode, bh, attr->ia_size);
+                       status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
                if (status < 0) {
                        if (status != -ENOSPC)
                                mlog_errno(status);
@@ -1049,21 +1080,12 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                if (!clusters)
                        break;
 
-               ret = ocfs2_extend_allocation(inode, clusters);
+               ret = ocfs2_extend_file(inode, NULL, newsize, count);
                if (ret < 0) {
                        if (ret != -ENOSPC)
                                mlog_errno(ret);
                        goto out;
                }
-
-               /* Fill any holes which would've been created by this
-                * write. If we're O_APPEND, this will wind up
-                * (correctly) being a noop. */
-               ret = ocfs2_zero_extend(inode, (u64) newsize - count);
-               if (ret < 0) {
-                       mlog_errno(ret);
-                       goto out;
-               }
                break;
        }
 
@@ -1146,6 +1168,22 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
                ocfs2_iocb_set_rw_locked(iocb);
        }
 
+       /*
+        * We're fine letting folks race truncates and extending
+        * writes with read across the cluster, just like they can
+        * locally. Hence no rw_lock during read.
+        * 
+        * Take and drop the meta data lock to update inode fields
+        * like i_size. This allows the checks down below
+        * generic_file_aio_read() a chance of actually working. 
+        */
+       ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto bail;
+       }
+       ocfs2_meta_unlock(inode, 0);
+
        ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
        if (ret == -EINVAL)
                mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
index 6a610ae5358394710688e518d022f25e85a51b41..eebc3cfa6be8c89dd5a55a8910ad4b1f3c9d2827 100644 (file)
@@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
 {
        struct ocfs2_journal_handle *retval = NULL;
 
-       retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
+       retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
        if (!retval) {
                mlog(ML_ERROR, "Failed to allocate memory for journal "
                     "handle!\n");
@@ -870,9 +870,11 @@ static int ocfs2_force_read_journal(struct inode *inode)
                if (p_blocks > CONCURRENT_JOURNAL_FILL)
                        p_blocks = CONCURRENT_JOURNAL_FILL;
 
+               /* We are reading journal data which should not
+                * be put in the uptodate cache */
                status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
                                           p_blkno, p_blocks, bhs, 0,
-                                          inode);
+                                          NULL);
                if (status < 0) {
                        mlog_errno(status);
                        goto bail;
@@ -982,7 +984,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
 {
        struct ocfs2_la_recovery_item *item;
 
-       item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
+       item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
        if (!item) {
                /* Though we wish to avoid it, we are in fact safe in
                 * skipping local alloc cleanup as fsck.ocfs2 is more
index 04a684dfdd96be735036db62b4c67a1a7cb65197..b8a00a79332676dd8956a10180926a2b580a7213 100644 (file)
@@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
             (unsigned long long)oi->ip_blkno,
             (unsigned long long)block, expand_tree);
 
-       new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
+       new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
        if (!new) {
                mlog_errno(-ENOMEM);
                return;
@@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
                 * has no way of tracking that. */
                for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
                        tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
-                                                  GFP_KERNEL);
+                                                  GFP_NOFS);
                        if (!tree[i]) {
                                mlog_errno(-ENOMEM);
                                goto out_free;
index 53049a20419768b3c00eea23fb6acf4ff4c71c0a..ee42765a8553e9c5ed51e47805f9bd1440679399 100644 (file)
@@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response
 {
        struct ocfs2_net_wait_ctxt *w;
 
-       w = kcalloc(1, sizeof(*w), GFP_KERNEL);
+       w = kcalloc(1, sizeof(*w), GFP_NOFS);
        if (!w) {
                mlog_errno(-ENOMEM);
                goto bail;
@@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
 
        BUG_ON(!ocfs2_is_valid_vote_request(type));
 
-       request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+       request = kcalloc(1, sizeof(*request), GFP_NOFS);
        if (!request) {
                mlog_errno(-ENOMEM);
        } else {
@@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg,
        struct ocfs2_super *osb = data;
        struct ocfs2_vote_work *work;
 
-       work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
+       work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS);
        if (!work) {
                status = -ENOMEM;
                mlog_errno(status);
index 53ec28c367770d5704ecdb24f7549470cb0ccf6a..317b7c7f38a73aac8cc8f3edc463919dabb2f947 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -1124,7 +1124,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
        prevent_tail_call(ret);
        return ret;
 }
-EXPORT_SYMBOL_GPL(sys_openat);
 
 #ifndef __alpha__
 
index 34c7a11d91f0ebbb29854a0116d8c558825fbbbc..70d9c5a37f5a3c6f7d5cad12e7bad8e1d2e21136 100644 (file)
@@ -434,6 +434,11 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
        if (dentry->d_name.len > SMB_MAXNAMELEN)
                goto out;
 
+       /* Do not allow lookup of names with backslashes in */
+       error = -EINVAL;
+       if (memchr(dentry->d_name.name, '\\', dentry->d_name.len))
+               goto out;
+
        lock_kernel();
        error = smb_proc_getattr(dentry, &finfo);
 #ifdef SMBFS_PARANOIA
index c71c375863cc473a254462d8187365cecb302367..c71dd2760d3254da61b60a06ea2a9732837ea072 100644 (file)
@@ -339,9 +339,11 @@ int smb_add_request(struct smb_request *req)
                /*
                 * On timeout or on interrupt we want to try and remove the
                 * request from the recvq/xmitq.
+                * First check if the request is still part of a queue. (May
+                * have been removed by some error condition)
                 */
                smb_lock_server(server);
-               if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
+               if (!list_empty(&req->rq_queue)) {
                        list_del_init(&req->rq_queue);
                        smb_rput(req);
                }
diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h
new file mode 100644 (file)
index 0000000..1e70908
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PXA2XX_SPI_H_
+#define PXA2XX_SPI_H_
+
+#define PXA2XX_CS_ASSERT (0x01)
+#define PXA2XX_CS_DEASSERT (0x02)
+
+#if defined(CONFIG_PXA25x)
+#define CLOCK_SPEED_HZ 3686400
+#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00)
+#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#elif defined(CONFIG_PXA27x)
+#define CLOCK_SPEED_HZ 13000000
+#define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00)
+#endif
+
+#define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1)))))
+#define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2)))))
+#define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3)))))
+
+enum pxa_ssp_type {
+       SSP_UNDEFINED = 0,
+       PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
+       PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
+       PXA27x_SSP,
+};
+
+/* device.platform_data for SSP controller devices */
+struct pxa2xx_spi_master {
+       enum pxa_ssp_type ssp_type;
+       u32 clock_enable;
+       u16 num_chipselect;
+       u8 enable_dma;
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct pxa2xx_spi_chip {
+       u8 tx_threshold;
+       u8 rx_threshold;
+       u8 dma_burst_size;
+       u32 timeout_microsecs;
+       u8 enable_loopback;
+       void (*cs_control)(u32 command);
+};
+
+#endif /*PXA2XX_SPI_H_*/
index a9c75b2c314f588bc279e9b59f8de3e72408cad8..842526055225a02e88a7cc9d8ed34a8c49ef8c21 100644 (file)
@@ -45,8 +45,6 @@ extern unsigned int elf_hwcap;
 
 #endif /* __ASSEMBLY__ */
 
-#define PROC_INFO_SZ   48
-
 #define HWCAP_SWP      1
 #define HWCAP_HALF     2
 #define HWCAP_THUMB    4
index 43ad4e55878c7961a535e6ce427b842adecb2372..406ca97a8ab29f99d381565ab3c9b745c1f147a3 100644 (file)
@@ -142,6 +142,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
        : "cc");
 }
 
+/* write_can_lock - would write_trylock() succeed? */
+#define __raw_write_can_lock(x)                ((x)->lock == 0x80000000)
+
 /*
  * Read locks are a bit more hairy:
  *  - Exclusively load the lock value.
@@ -198,4 +201,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
 
 #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
+/* read_can_lock - would read_trylock() succeed? */
+#define __raw_read_can_lock(x)         ((x)->lock < 0x80000000)
+
 #endif /* __ASM_SPINLOCK_H */
index 657d582e8149504e331e65dc7e30cf85aceb7117..41c2792ff6b04cd9d0be3d98c774549113ba2552 100644 (file)
 #define __NR_pselect6          301
 #define __NR_ppoll             302
 #define __NR_unshare           303
+#define __NR_set_robust_list   304
+#define __NR_get_robust_list   305
+#define __NR_splice            306
+#define __NR_sync_file_range   307
+#define __NR_tee               308
+#define __NR_vmsplice          309
 
-#define NR_syscalls 304
+#define NR_syscalls 310
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index e1bd0842f6a13e71ad0b781d4a5bdf6605986fbe..f4fc576ed4c40c6d28dd940a869801a138a0a5e6 100644 (file)
@@ -124,6 +124,7 @@ extern int get_option(char **str, int *pint);
 extern char *get_options(const char *str, int nints, int *ints);
 extern unsigned long long memparse(char *ptr, char **retptr);
 
+extern int core_kernel_text(unsigned long addr);
 extern int __kernel_text_address(unsigned long addr);
 extern int kernel_text_address(unsigned long addr);
 extern int session_of_pgrp(int pgrp);
index bdc556d884989c9e51f6c46b915f4efb146e162b..03a14a30c46a1230d57443007783e466b519450b 100644 (file)
@@ -69,6 +69,7 @@ struct mmc_data {
        unsigned int            timeout_ns;     /* data timeout (in ns, max 80ms) */
        unsigned int            timeout_clks;   /* data timeout (in clocks) */
        unsigned int            blksz_bits;     /* data block size */
+       unsigned int            blksz;          /* data block size */
        unsigned int            blocks;         /* number of blocks */
        unsigned int            error;          /* data error */
        unsigned int            flags;
index 5673008b61e11ae127608228fdbe6eacd9cec27d..970284f571a6d522f60362c0f585ba7d837b1cb2 100644 (file)
@@ -132,6 +132,7 @@ static inline void rcu_bh_qsctr_inc(int cpu)
 }
 
 extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
 
 /**
  * rcu_read_lock - mark the beginning of an RCU read-side critical section.
index 3af03b19c9830275c77ef99f98c6a22e76a79a7f..2d985d59c7b86759663761c3ec333ec196730026 100644 (file)
@@ -150,6 +150,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 
 extern void kfree(const void *);
 extern unsigned int ksize(const void *);
+extern int slab_is_available(void);
 
 #ifdef CONFIG_NUMA
 extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
index b05f1463a2676de35237dda137772ad70dc1c93d..e928c0dcc29755d31c2ba5b7f1d96b1f3ac50b1e 100644 (file)
@@ -31,18 +31,23 @@ extern struct bus_type spi_bus_type;
  * @master: SPI controller used with the device.
  * @max_speed_hz: Maximum clock rate to be used with this chip
  *     (on this board); may be changed by the device's driver.
+ *     The spi_transfer.speed_hz can override this for each transfer.
  * @chip-select: Chipselect, distinguishing chips handled by "master".
  * @mode: The spi mode defines how data is clocked out and in.
  *     This may be changed by the device's driver.
+ *     The "active low" default for chipselect mode can be overridden,
+ *     as can the "MSB first" default for each word in a transfer.
  * @bits_per_word: Data transfers involve one or more words; word sizes
- *     like eight or 12 bits are common.  In-memory wordsizes are
+ *     like eight or 12 bits are common.  In-memory wordsizes are
  *     powers of two bytes (e.g. 20 bit samples use 32 bits).
- *     This may be changed by the device's driver.
+ *     This may be changed by the device's driver, or left at the
+ *     default (0) indicating protocol words are eight bit bytes.
+ *     The spi_transfer.bits_per_word can override this for each transfer.
  * @irq: Negative, or the number passed to request_irq() to receive
- *     interrupts from this device.
+ *     interrupts from this device.
  * @controller_state: Controller's runtime state
  * @controller_data: Board-specific definitions for controller, such as
- *     FIFO initialization parameters; from board_info.controller_data
+ *     FIFO initialization parameters; from board_info.controller_data
  *
  * An spi_device is used to interchange data between an SPI slave
  * (usually a discrete chip) and CPU memory.
@@ -65,6 +70,7 @@ struct spi_device {
 #define        SPI_MODE_2      (SPI_CPOL|0)
 #define        SPI_MODE_3      (SPI_CPOL|SPI_CPHA)
 #define        SPI_CS_HIGH     0x04                    /* chipselect active high? */
+#define        SPI_LSB_FIRST   0x08                    /* per-word bits-on-wire */
        u8                      bits_per_word;
        int                     irq;
        void                    *controller_state;
@@ -73,7 +79,6 @@ struct spi_device {
 
        // likely need more hooks for more protocol options affecting how
        // the controller talks to each chip, like:
-       //  - bit order (default is wordwise msb-first)
        //  - memory packing (12 bit samples into low bits, others zeroed)
        //  - priority
        //  - drop chipselect after each word
@@ -143,13 +148,13 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * struct spi_master - interface to SPI master controller
  * @cdev: class interface to this driver
  * @bus_num: board-specific (and often SOC-specific) identifier for a
- *     given SPI controller.
+ *     given SPI controller.
  * @num_chipselect: chipselects are used to distinguish individual
- *     SPI slaves, and are numbered from zero to num_chipselects.
- *     each slave has a chipselect signal, but it's common that not
- *     every chipselect is connected to a slave.
+ *     SPI slaves, and are numbered from zero to num_chipselects.
+ *     each slave has a chipselect signal, but it's common that not
+ *     every chipselect is connected to a slave.
  * @setup: updates the device mode and clocking records used by a
- *     device's SPI controller; protocol code may call this.
+ *     device's SPI controller; protocol code may call this.
  * @transfer: adds a message to the controller's transfer queue.
  * @cleanup: frees controller-specific state
  *
@@ -167,13 +172,13 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
 struct spi_master {
        struct class_device     cdev;
 
-       /* other than zero (== assign one dynamically), bus_num is fully
+       /* other than negative (== assign one dynamically), bus_num is fully
         * board-specific.  usually that simplifies to being SOC-specific.
-        * example:  one SOC has three SPI controllers, numbered 1..3,
+        * example:  one SOC has three SPI controllers, numbered 0..2,
         * and one board's schematics might show it using SPI-2.  software
         * would normally use bus_num=2 for that controller.
         */
-       u16                     bus_num;
+       s16                     bus_num;
 
        /* chipselects will be integral to many controllers; some others
         * might use board-specific GPIOs.
@@ -268,10 +273,14 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
  * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
  * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
  * @len: size of rx and tx buffers (in bytes)
+ * @speed_hz: Select a speed other then the device default for this
+ *      transfer. If 0 the default (from spi_device) is used.
+ * @bits_per_word: select a bits_per_word other then the device default
+ *      for this transfer. If 0 the default (from spi_device) is used.
  * @cs_change: affects chipselect after this transfer completes
  * @delay_usecs: microseconds to delay after this transfer before
- *     (optionally) changing the chipselect status, then starting
- *     the next transfer or completing this spi_message.
+ *     (optionally) changing the chipselect status, then starting
+ *     the next transfer or completing this spi_message.
  * @transfer_list: transfers are sequenced through spi_message.transfers
  *
  * SPI transfers always write the same number of bytes as they read.
@@ -322,7 +331,9 @@ struct spi_transfer {
        dma_addr_t      rx_dma;
 
        unsigned        cs_change:1;
+       u8              bits_per_word;
        u16             delay_usecs;
+       u32             speed_hz;
 
        struct list_head transfer_list;
 };
@@ -356,7 +367,7 @@ struct spi_transfer {
  * and its transfers, ignore them until its completion callback.
  */
 struct spi_message {
-       struct list_head        transfers;
+       struct list_head        transfers;
 
        struct spi_device       *spi;
 
@@ -374,7 +385,7 @@ struct spi_message {
         */
 
        /* completion is reported through a callback */
-       void                    (*complete)(void *context);
+       void                    (*complete)(void *context);
        void                    *context;
        unsigned                actual_length;
        int                     status;
index c961fe9bf3eb48f3576035a7ec62bc5c743bc008..16ce178f54d7a0de40a69ebabfdddc488f4039fa 100644 (file)
@@ -30,6 +30,12 @@ struct spi_bitbang {
 
        struct spi_master       *master;
 
+       /* setup_transfer() changes clock and/or wordsize to match settings
+        * for this transfer; zeroes restore defaults from spi_device.
+        */
+       int     (*setup_transfer)(struct spi_device *spi,
+                       struct spi_transfer *t);
+
        void    (*chipselect)(struct spi_device *spi, int is_on);
 #define        BITBANG_CS_ACTIVE       1       /* normally nCS, active low */
 #define        BITBANG_CS_INACTIVE     0
@@ -51,6 +57,8 @@ struct spi_bitbang {
 extern int spi_bitbang_setup(struct spi_device *spi);
 extern void spi_bitbang_cleanup(const struct spi_device *spi);
 extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m);
+extern int spi_bitbang_setup_transfer(struct spi_device *spi,
+                                     struct spi_transfer *t);
 
 /* start or stop queue processing */
 extern int spi_bitbang_start(struct spi_bitbang *spi);
index 5b1fdf1cff4f057a67eb1c81e487acad031a89b2..f03c247193022ae3ec7ee7457db7ce3f44cbce80 100644 (file)
@@ -296,7 +296,7 @@ static inline void disable_swap_token(void)
 #define read_swap_cache_async(swp,vma,addr)    NULL
 #define lookup_swap_cache(swp)                 NULL
 #define valid_swaphandles(swp, off)            0
-#define can_share_swap_page(p)                 0
+#define can_share_swap_page(p)                 (page_mapcount(p) == 1)
 #define move_to_swap_cache(p, swp)             1
 #define move_from_swap_cache(p, i, m)          1
 #define __delete_from_swap_cache(p)            /*NOTHING*/
index 34a1a09e5aeff93927fb058be18e79e6afbf32df..807d6f1ef4b505bd302de0281aee75fc71a2b50a 100644 (file)
@@ -99,6 +99,7 @@ typedef enum {
        SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
        SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
        SCTP_CMD_FORCE_PRIM_RETRAN,  /* Forces retrans. over primary path. */
+       SCTP_CMD_SET_SK_ERR,     /* Set sk_err */
        SCTP_CMD_LAST
 } sctp_verb_t;
 
index e673b2c984e931c9a80d7c5e677bfdc1984fa4c3..aa6033ca7cd859d3614406a54b0d4885e79a94dc 100644 (file)
@@ -461,12 +461,12 @@ static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu)
  * there is room for a param header too.
  */
 #define sctp_walk_params(pos, chunk, member)\
-_sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member)
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
 
 #define _sctp_walk_params(pos, chunk, end, member)\
 for (pos.v = chunk->member;\
      pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
-     pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\
+     pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
      ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
      pos.v += WORD_ROUND(ntohs(pos.p->length)))
 
@@ -477,7 +477,7 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
 for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
            sizeof(sctp_chunkhdr_t));\
      (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\
-     (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\
+     (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
      err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
 
index adb7cad3e6eec17165efdf88acf1f0f936fc4c9d..f4b7b9d278cd3bf040cdac1a1ee123401549f103 100644 (file)
@@ -310,6 +310,11 @@ retry:
 
                panic("VFS: Unable to mount root fs on %s", b);
        }
+
+       printk("No filesystem could mount root, tried: ");
+       for (p = fs_names; *p; p += strlen(p)+1)
+               printk(" %s", p);
+       printk("\n");
        panic("VFS: Unable to mount root fs on %s", __bdevname(ROOT_DEV, b));
 out:
        putname(fs_names);
index 679d870d991b760438a95d133c9684b9f381eb0d..f81cfa40a719858a003ab50448f3d0925005d5ff 100644 (file)
@@ -26,10 +26,12 @@ static void __init free(void *where)
 
 /* link hash */
 
+#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
+
 static __initdata struct hash {
        int ino, minor, major;
        struct hash *next;
-       char *name;
+       char name[N_ALIGN(PATH_MAX)];
 } *head[32];
 
 static inline int hash(int major, int minor, int ino)
@@ -57,7 +59,7 @@ static char __init *find_link(int major, int minor, int ino, char *name)
        q->ino = ino;
        q->minor = minor;
        q->major = major;
-       q->name = name;
+       strcpy(q->name, name);
        q->next = NULL;
        *p = q;
        return NULL;
@@ -133,8 +135,6 @@ static inline void eat(unsigned n)
        count -= n;
 }
 
-#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
-
 static __initdata char *collected;
 static __initdata int remains;
 static __initdata char *collect;
index 7501b531ceedab97f969402822be75d6819b042f..7fe2628553172eadc0d49a3cbaf9d779b80b5a7c 100644 (file)
@@ -40,7 +40,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
        return e;
 }
 
-static int core_kernel_text(unsigned long addr)
+int core_kernel_text(unsigned long addr)
 {
        if (addr >= (unsigned long)_stext &&
            addr <= (unsigned long)_etext)
index d24deb0dbbc9488e91ba855342399c22c3f9d309..bbe04862e1b09113dd12dd49749f551cd9fbeecf 100644 (file)
@@ -705,14 +705,14 @@ EXPORT_SYMBOL(__symbol_put);
 
 void symbol_put_addr(void *addr)
 {
-       unsigned long flags;
+       struct module *modaddr;
 
-       spin_lock_irqsave(&modlist_lock, flags);
-       if (!kernel_text_address((unsigned long)addr))
-               BUG();
+       if (core_kernel_text((unsigned long)addr))
+               return;
 
-       module_put(module_text_address((unsigned long)addr));
-       spin_unlock_irqrestore(&modlist_lock, flags);
+       if (!(modaddr = module_text_address((unsigned long)addr)))
+               BUG();
+       module_put(modaddr);
 }
 EXPORT_SYMBOL_GPL(symbol_put_addr);
 
index 6d32ff26f9484190f4588107277d08f7dee40909..2058f88c7bbb3d9c9d0c7e195f3d67f25d8f64ed 100644 (file)
@@ -479,12 +479,31 @@ static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
        return 0;
 }
 
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so.  This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
 int rcu_pending(int cpu)
 {
        return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
                __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
 }
 
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so.  This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+       return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
+}
+
 void rcu_check_callbacks(int cpu, int user)
 {
        if (user || 
index ea77c999047ec634746d3b6457724bcb7c5eac5c..813b4ec1298a23ee10e0d091305bd8520ba6fad0 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/mempolicy.h>
 
 #include <asm/tlbflush.h>
+#include <asm/div64.h>
 #include "internal.h"
 
 /*
@@ -2566,9 +2567,11 @@ void setup_per_zone_pages_min(void)
        }
 
        for_each_zone(zone) {
-               unsigned long tmp;
+               u64 tmp;
+
                spin_lock_irqsave(&zone->lru_lock, flags);
-               tmp = (pages_min * zone->present_pages) / lowmem_pages;
+               tmp = (u64)pages_min * zone->present_pages;
+               do_div(tmp, lowmem_pages);
                if (is_highmem(zone)) {
                        /*
                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2595,8 +2598,8 @@ void setup_per_zone_pages_min(void)
                        zone->pages_min = tmp;
                }
 
-               zone->pages_low   = zone->pages_min + tmp / 4;
-               zone->pages_high  = zone->pages_min + tmp / 2;
+               zone->pages_low   = zone->pages_min + (tmp >> 2);
+               zone->pages_high  = zone->pages_min + (tmp >> 1);
                spin_unlock_irqrestore(&zone->lru_lock, flags);
        }
 
index c32af7e7581ef0ff11ef112f94b6a84da50cafc4..d31a06bfbea5c78998e3c68a5a484b98b3e4579f 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -700,6 +700,14 @@ static enum {
        FULL
 } g_cpucache_up;
 
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+       return g_cpucache_up == FULL;
+}
+
 static DEFINE_PER_CPU(struct work_struct, reap_work);
 
 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -2192,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
        check_irq_on();
        for_each_online_node(node) {
                l3 = cachep->nodelists[node];
-               if (l3) {
+               if (l3 && l3->alien)
+                       drain_alien_cache(cachep, l3->alien);
+       }
+
+       for_each_online_node(node) {
+               l3 = cachep->nodelists[node];
+               if (l3)
                        drain_array(cachep, l3, l3->shared, 1, node);
-                       if (l3->alien)
-                               drain_alien_cache(cachep, l3->alien);
-               }
        }
 }
 
index d7c32de99ee8c49d0e850a06084d5efe508a7625..c5e89eb9ac8ff10e1e15fced114de41058df4a7e 100644 (file)
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid)
        unsigned long array_size = SECTIONS_PER_ROOT *
                                   sizeof(struct mem_section);
 
-       if (system_state == SYSTEM_RUNNING)
+       if (slab_is_available())
                section = kmalloc_node(array_size, GFP_KERNEL, nid);
        else
                section = alloc_bootmem_node(NODE_DATA(nid), array_size);
index afd8385c0c9c319bff792f714cc3326881db95d9..e9dc803f2fe0a67de78c4219227f4876352cd89a 100644 (file)
@@ -643,6 +643,5 @@ static int __init rif_init(void)
 
 module_init(rif_init);
 
-EXPORT_SYMBOL(tr_source_route);
 EXPORT_SYMBOL(tr_type_trans);
 EXPORT_SYMBOL(alloc_trdev);
index d159c92cca84a0482757c5d58f12bd73a193adfa..466ed3440b7446b3c6d976963b3056eb7f6dc9f5 100644 (file)
@@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
 
        if (info->bitmask & EBT_LOG_NFLOG)
                nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
-                             info->prefix);
+                             "%s", info->prefix);
        else
                ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
                               info->prefix);
index c2d92f99a2b8b0b18bc1f17e1ec742a2c6051de5..d0d19192026d9eb2327cf68a47a4ba1b23dc7fe5 100644 (file)
@@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len)
 
        write_lock_bh(&t->lock);
        private = t->private;
-       if (private->number != paddc->num_counters) {
+       if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
index 6c4899d8046a5b3265555869e9c8528f3a5553c2..96ceabaec4027932854f46479b112d45a4ac53d6 100644 (file)
@@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple,
             const union ip_conntrack_manip_proto *min,
             const union ip_conntrack_manip_proto *max)
 {
-       u_int32_t key;
+       __be16 key;
 
        if (maniptype == IP_NAT_MANIP_SRC)
                key = tuple->src.u.gre.key;
        else
                key = tuple->dst.u.gre.key;
 
-       return ntohl(key) >= ntohl(min->gre.key)
-               && ntohl(key) <= ntohl(max->gre.key);
+       return ntohs(key) >= ntohs(min->gre.key)
+               && ntohs(key) <= ntohs(max->gre.key);
 }
 
 /* generate unique tuple ... */
@@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple,
                min = 1;
                range_size = 0xffff;
        } else {
-               min = ntohl(range->min.gre.key);
-               range_size = ntohl(range->max.gre.key) - min + 1;
+               min = ntohs(range->min.gre.key);
+               range_size = ntohs(range->max.gre.key) - min + 1;
        }
 
        DEBUGP("min = %u, range_size = %u\n", min, range_size); 
 
        for (i = 0; i < range_size; i++, key++) {
-               *keyptr = htonl(min + key % range_size);
+               *keyptr = htons(min + key % range_size);
                if (!ip_nat_used_tuple(tuple, conntrack))
                        return 1;
        }
index 39fd4c2a2386248421a74795665bd50dda749293..b98f7b08b084586478ec431ec83485aa6118ff8c 100644 (file)
@@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb,
 
        if (loginfo->logflags & IPT_LOG_NFLOG)
                nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
-                             loginfo->prefix);
+                             "%s", loginfo->prefix);
        else
                ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
                               loginfo->prefix);
index 1438432857021b1ebd09486362cd9243f5f9554e..b847ee409efbb7146987f526998ccb8ea51422fe 100644 (file)
@@ -821,6 +821,7 @@ checkentry(const char *tablename,
        /* Create our proc 'status' entry. */
        curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
        if (!curr_table->status_proc) {
+               vfree(hold);
                printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
                /* Destroy the created table */
                spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@ checkentry(const char *tablename,
                spin_unlock_bh(&recent_lock);
                vfree(curr_table->time_info);
                vfree(curr_table->hash_table);
-               vfree(hold);
                vfree(curr_table->table);
                vfree(curr_table);
                return 0;
index 9f0cca4c4fae2021e52ee5655f9b89b9ade7f753..4a538bc1683d69827f46952a537841bb4360b58c 100644 (file)
@@ -1662,6 +1662,8 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
                        if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                                tp->lost_out += tcp_skb_pcount(skb);
+                               if (IsReno(tp))
+                                       tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
 
                                /* clear xmit_retrans hint */
                                if (tp->retransmit_skb_hint &&
index 0a673038344fe30142e0d095f3f6bb5c6a8854a8..2e72f89a7019929f2da18cb74ef1ba26e9d1d74b 100644 (file)
@@ -1103,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len)
 
        write_lock_bh(&t->lock);
        private = t->private;
-       if (private->number != paddc->num_counters) {
+       if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
index a96c0de14b00ed963b52a221cbaca428b46f483b..73c6300109d61d7243e89f5268d3d9fbe3ec3e30 100644 (file)
@@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb,
 
        if (loginfo->logflags & IP6T_LOG_NFLOG)
                nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
-                             loginfo->prefix);
+                             "%s", loginfo->prefix);
        else
                ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
                                loginfo->prefix);
index 94dbdb8b458d01fa1dc85cd42da93e5d84b85024..4f6b84c8f4ab6567cc70301d7bdcc2460eabb38a 100644 (file)
@@ -40,7 +40,7 @@ match(const struct sk_buff *skb,
 
        memset(eui64, 0, sizeof(eui64));
 
-       if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+       if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
                if (skb->nh.ipv6h->version == 0x6) {
                        memcpy(eui64, eth_hdr(skb)->h_source, 3);
                        memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
index 2dbf134d5266068e633896e909a71d2f5572c8b4..811d998725bc6728bec208ad90ef81157f8235a0 100644 (file)
@@ -944,9 +944,9 @@ out:
        return rc;
 }
 
-static int ipx_map_frame_type(unsigned char type)
+static __be16 ipx_map_frame_type(unsigned char type)
 {
-       int rc = 0;
+       __be16 rc = 0;
 
        switch (type) {
        case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX);          break;
index 67774448efd966eb801dc822d0788e09a530be54..a394c6fe19a26839e07dbed3f7015224d03c1161 100644 (file)
@@ -119,7 +119,7 @@ out:
        return rc;
 }
 
-static int ipxrtr_delete(long net)
+static int ipxrtr_delete(__u32 net)
 {
        struct ipx_route *r, *tmp;
        int rc;
index c60273cad778ed1c3cdc7eda166aafc6b21d9802..61cdda4e5d3ba8b25f02af19a7bbb634fc9a0d84 100644 (file)
@@ -321,7 +321,7 @@ static int
 nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
 {
        spin_lock_bh(&inst->lock);
-       inst->flags = ntohs(flags);
+       inst->flags = flags;
        spin_unlock_bh(&inst->lock);
 
        return 0;
@@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
        if (nfula[NFULA_CFG_FLAGS-1]) {
                u_int16_t flags =
                        *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
-               nfulnl_set_flags(inst, ntohl(flags));
+               nfulnl_set_flags(inst, ntohs(flags));
        }
 
 out_put:
index 31eb83717c267a799b013f073cfe3e99301709d9..138ea92ed268457b99d9c6b44a1a9580cc7c016a 100644 (file)
@@ -193,8 +193,10 @@ static void dev_watchdog(unsigned long arg)
                    netif_running(dev) &&
                    netif_carrier_ok(dev)) {
                        if (netif_queue_stopped(dev) &&
-                           (jiffies - dev->trans_start) > dev->watchdog_timeo) {
-                               printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
+                           time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
+
+                               printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
+                                      dev->name);
                                dev->tx_timeout(dev);
                        }
                        if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
index d117ebc75cf87ce25aa8699f61b4338b12fc1053..1662f9cc869e0a1cd463b7f3575c0c243dfc4ded 100644 (file)
@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association(
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt);
 
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
 static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb)
         */
        if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
        {
-               sock_put(sk);
                if (asoc) {
                        sctp_association_put(asoc);
                        asoc = NULL;
@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb)
                sk = sctp_get_ctl_sock();
                ep = sctp_sk(sk)->ep;
                sctp_endpoint_hold(ep);
-               sock_hold(sk);
                rcvr = &ep->base;
        }
 
@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb)
         */
        sctp_bh_lock_sock(sk);
 
-       /* It is possible that the association could have moved to a different
-        * socket if it is peeled off. If so, update the sk.
-        */ 
-       if (sk != rcvr->sk) {
-               sctp_bh_lock_sock(rcvr->sk);
-               sctp_bh_unlock_sock(sk);
-               sk = rcvr->sk;
-       }
-
        if (sock_owned_by_user(sk))
-               sk_add_backlog(sk, skb);
+               sctp_add_backlog(sk, skb);
        else
-               sctp_backlog_rcv(sk, skb);
+               sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 
-       /* Release the sock and the sock ref we took in the lookup calls.
-        * The asoc/ep ref will be released in sctp_backlog_rcv.
-        */
        sctp_bh_unlock_sock(sk);
-       sock_put(sk);
+
+       /* Release the asoc/ep ref we took in the lookup calls. */
+       if (asoc)
+               sctp_association_put(asoc);
+       else
+               sctp_endpoint_put(ep);
 
        return 0;
 
@@ -280,8 +273,7 @@ discard_it:
        return 0;
 
 discard_release:
-       /* Release any structures we may be holding. */
-       sock_put(sk);
+       /* Release the asoc/ep ref we took in the lookup calls. */
        if (asoc)
                sctp_association_put(asoc);
        else
@@ -290,56 +282,87 @@ discard_release:
        goto discard_it;
 }
 
-/* Handle second half of inbound skb processing.  If the sock was busy,
- * we may have need to delay processing until later when the sock is
- * released (on the backlog).   If not busy, we call this routine
- * directly from the bottom half.
+/* Process the backlog queue of the socket.  Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
  */
 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
-       struct sctp_inq *inqueue = NULL;
+       struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
        struct sctp_ep_common *rcvr = NULL;
+       int backloged = 0;
 
        rcvr = chunk->rcvr;
 
-       BUG_TRAP(rcvr->sk == sk);
-
-       if (rcvr->dead) {
-               sctp_chunk_free(chunk);
-       } else {
-               inqueue = &chunk->rcvr->inqueue;
-               sctp_inq_push(inqueue, chunk);
-       }
-
-       /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ 
-       if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
-               sctp_association_put(sctp_assoc(rcvr));
-       else
-               sctp_endpoint_put(sctp_ep(rcvr));
-  
+       /* If the rcvr is dead then the association or endpoint
+        * has been deleted and we can safely drop the chunk
+        * and refs that we are holding.
+        */
+       if (rcvr->dead) {
+               sctp_chunk_free(chunk);
+               goto done;
+       }
+
+       if (unlikely(rcvr->sk != sk)) {
+               /* In this case, the association moved from one socket to
+                * another.  We are currently sitting on the backlog of the
+                * old socket, so we need to move.
+                * However, since we are here in the process context we
+                * need to take make sure that the user doesn't own
+                * the new socket when we process the packet.
+                * If the new socket is user-owned, queue the chunk to the
+                * backlog of the new socket without dropping any refs.
+                * Otherwise, we can safely push the chunk on the inqueue.
+                */
+
+               sk = rcvr->sk;
+               sctp_bh_lock_sock(sk);
+
+               if (sock_owned_by_user(sk)) {
+                       sk_add_backlog(sk, skb);
+                       backloged = 1;
+               } else
+                       sctp_inq_push(inqueue, chunk);
+
+               sctp_bh_unlock_sock(sk);
+
+               /* If the chunk was backloged again, don't drop refs */
+               if (backloged)
+                       return 0;
+       } else {
+               sctp_inq_push(inqueue, chunk);
+       }
+
+done:
+       /* Release the refs we took in sctp_add_backlog */
+       if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+               sctp_association_put(sctp_assoc(rcvr));
+       else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+               sctp_endpoint_put(sctp_ep(rcvr));
+       else
+               BUG();
+
         return 0;
 }
 
-void sctp_backlog_migrate(struct sctp_association *assoc, 
-                         struct sock *oldsk, struct sock *newsk)
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-       struct sk_buff *skb;
-       struct sctp_chunk *chunk;
+       struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+       struct sctp_ep_common *rcvr = chunk->rcvr;
 
-       skb = oldsk->sk_backlog.head;
-       oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
-       while (skb != NULL) {
-               struct sk_buff *next = skb->next;
-
-               chunk = SCTP_INPUT_CB(skb)->chunk;
-               skb->next = NULL;
-               if (&assoc->base == chunk->rcvr)
-                       sk_add_backlog(newsk, skb);
-               else
-                       sk_add_backlog(oldsk, skb);
-               skb = next;
-       }
+       /* Hold the assoc/ep while hanging on the backlog queue.
+        * This way, we know structures we need will not disappear from us
+        */
+       if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+               sctp_association_hold(sctp_assoc(rcvr));
+       else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+               sctp_endpoint_hold(sctp_ep(rcvr));
+       else
+               BUG();
+
+       sk_add_backlog(sk, skb);
 }
 
 /* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
        union sctp_addr daddr;
        struct sctp_af *af;
        struct sock *sk = NULL;
-       struct sctp_association *asoc = NULL;
+       struct sctp_association *asoc;
        struct sctp_transport *transport = NULL;
 
        *app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
        return sk;
 
 out:
-       sock_put(sk);
        if (asoc)
                sctp_association_put(asoc);
        return NULL;
@@ -463,7 +485,6 @@ out:
 void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
 {
        sctp_bh_unlock_sock(sk);
-       sock_put(sk);
        if (asoc)
                sctp_association_put(asoc);
 }
@@ -490,7 +511,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        int type = skb->h.icmph->type;
        int code = skb->h.icmph->code;
        struct sock *sk;
-       struct sctp_association *asoc;
+       struct sctp_association *asoc = NULL;
        struct sctp_transport *transport;
        struct inet_sock *inet;
        char *saveip, *savesctp;
@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
 
 hit:
        sctp_endpoint_hold(ep);
-       sock_hold(epb->sk);
        read_unlock(&head->lock);
        return ep;
 }
@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association(
 hit:
        *pt = transport;
        sctp_association_hold(asoc);
-       sock_hold(epb->sk);
        read_unlock(&head->lock);
        return asoc;
 }
@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr,
        struct sctp_transport *transport;
 
        if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
-               sock_put(asoc->base.sk);
                sctp_association_put(asoc);
                return 1;
        }
index 8d1dc24bab4c1f1da541c760e13955ca54b77d29..c5beb2ad7ef7a442798a669faea2c393d5f422f8 100644 (file)
@@ -498,10 +498,6 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       /* Set sk_err to ECONNRESET on a 1-1 style socket. */
-       if (!sctp_style(asoc->base.sk, UDP))
-               asoc->base.sk->sk_err = ECONNRESET; 
-
        /* SEND_FAILED sent later when cleaning up the association. */
        asoc->outqueue.error = error;
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
        return;
 }
 
+/* Helper function to set sk_err on a 1-1 style socket. */
+static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
+{
+       struct sock *sk = asoc->base.sk;
+
+       if (!sctp_style(sk, UDP))
+               sk->sk_err = error;
+}
+
 /* These three macros allow us to pull the debugging code out of the
  * main flow of sctp_do_sm() to keep attention focused on the real
  * functionality there.
@@ -1458,6 +1463,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        local_cork = 0;
                        asoc->peer.retran_path = t;
                        break;
+               case SCTP_CMD_SET_SK_ERR:
+                       sctp_cmd_set_sk_err(asoc, cmd->obj.error);
+                       break;
                default:
                        printk(KERN_WARNING "Impossible command: %u, %p\n",
                               cmd->verb, cmd->obj.ptr);
index 8cdba51ec0766a4d333ddc30e7ba4b756fe7802f..8bc279219a72ca3b8e4e107eaeba562e87a79e4a 100644 (file)
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
-                                          __u16 error,
+                                          __u16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport);
 
@@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
        __u32 init_tag;
        struct sctp_chunk *err_chunk;
        struct sctp_packet *packet;
-       sctp_disposition_t ret;
+       __u16 error;
 
        if (!sctp_vtag_verify(chunk, asoc))
                return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                        goto nomem;
 
                sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
-               sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-                               SCTP_STATE(SCTP_STATE_CLOSED));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
-               return SCTP_DISPOSITION_DELETE_TCB;
+               return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
+                                             ECONNREFUSED, asoc,
+                                             chunk->transport);
        }
 
        /* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
                                SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
-                               sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-                                               SCTP_STATE(SCTP_STATE_CLOSED));
-                               sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
-                                               SCTP_NULL());
-                               return SCTP_DISPOSITION_CONSUME;
+                               error = SCTP_ERROR_INV_PARAM;
                        } else {
-                               sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-                                               SCTP_STATE(SCTP_STATE_CLOSED));
-                               sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
-                                               SCTP_NULL());
-                               return SCTP_DISPOSITION_NOMEM;
+                               error = SCTP_ERROR_NO_RESOURCE;
                        }
                } else {
-                       ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
-                                                  commands);
-                       sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
-                                       SCTP_STATE(SCTP_STATE_CLOSED));
-                       sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
-                                       SCTP_NULL());
-                       return ret;
+                       sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+                       error = SCTP_ERROR_INV_PARAM;
                }
+               return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+                                               asoc, chunk->transport);
        }
 
        /* Tag the variable length parameters.  Note that we never
@@ -886,6 +873,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
        struct sctp_transport *transport = (struct sctp_transport *) arg;
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -1030,6 +1019,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
                                                  commands);
 
        hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
+       /* Make sure that the length of the parameter is what we expect */
+       if (ntohs(hbinfo->param_hdr.length) !=
+                                   sizeof(sctp_sender_hb_info_t)) {
+               return SCTP_DISPOSITION_DISCARD;
+       }
+
        from_addr = hbinfo->daddr;
        link = sctp_assoc_lookup_paddr(asoc, &from_addr);
 
@@ -2126,6 +2121,8 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
        int attempts = asoc->init_err_counter + 1;
 
        if (attempts > asoc->max_init_attempts) {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                                SCTP_U32(SCTP_ERROR_STALE_COOKIE));
                return SCTP_DISPOSITION_DELETE_TCB;
@@ -2262,6 +2259,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
        /* ASSOC_FAILED will DELETE_TCB. */
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
        SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2306,7 +2304,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
        if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
-       return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport);
+       return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+                                     chunk->transport);
 }
 
 /*
@@ -2318,7 +2317,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
                                        void *arg,
                                        sctp_cmd_seq_t *commands)
 {
-       return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc,
+       return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+                                     ENOPROTOOPT, asoc,
                                      (struct sctp_transport *)arg);
 }
 
@@ -2343,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
-                                          __u16 error,
+                                          __u16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport)
 {
@@ -2353,6 +2353,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
        SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
        /* CMD_INIT_FAILED will DELETE_TCB. */
        sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                        SCTP_U32(error));
@@ -3336,6 +3337,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
                sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_ASCONF_ACK));
                SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3362,6 +3365,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                 * processing the rest of the chunks in the packet.
                 */
                sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_ASCONF_ACK));
                SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3714,9 +3719,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
        if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ECONNREFUSED));
                sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                                SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
        } else {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
                SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4034,6 +4043,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
         * TCB.  This is a departure from our typical NOMEM handling.
         */
 
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                       SCTP_ERROR(ECONNABORTED));
        /* Delete the established association. */
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4175,6 +4186,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
         * TCB.  This is a departure from our typical NOMEM handling.
         */
 
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                       SCTP_ERROR(ECONNREFUSED));
        /* Delete the established association. */
        sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                        SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4543,6 +4556,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
        struct sctp_transport *transport = arg;
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4662,6 +4677,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
                SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
                                  " max_init_attempts: %d\n",
                                  attempts, asoc->max_init_attempts);
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
                return SCTP_DISPOSITION_DELETE_TCB;
@@ -4711,6 +4728,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
 
                sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
        } else {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
                return SCTP_DISPOSITION_DELETE_TCB;
@@ -4742,6 +4761,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
 
        SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
        if (asoc->overall_error_count >= asoc->max_retrans) {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                /* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4817,6 +4838,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
        if (asoc->overall_error_count >= asoc->max_retrans) {
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
                SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4870,6 +4893,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
                goto nomem;
 
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                       SCTP_ERROR(ETIMEDOUT));
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_U32(SCTP_ERROR_NO_ERROR));
 
@@ -5309,6 +5334,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
                 * processing the rest of the chunks in the packet.
                 */
                sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_DATA));
                SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
index b6e4b89539b3178216037c53350f5591dde8a1b6..174d4d35e951590d7bef5d10df113afe36f88ae6 100644 (file)
@@ -1057,6 +1057,7 @@ static int __sctp_connect(struct sock* sk,
        inet_sk(sk)->dport = htons(asoc->peer.port);
        af = sctp_get_af_specific(to.sa.sa_family);
        af->to_sk_daddr(&to, sk);
+       sk->sk_err = 0;
 
        timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
        err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
        ep = sctp_sk(sk)->ep;
 
-       /* Walk all associations on a socket, not on an endpoint.  */
+       /* Walk all associations on an endpoint.  */
        list_for_each_safe(pos, temp, &ep->asocs) {
                asoc = list_entry(pos, struct sctp_association, asocs);
 
@@ -1241,13 +1242,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        if (sctp_state(asoc, CLOSED)) {
                                sctp_unhash_established(asoc);
                                sctp_association_free(asoc);
+                               continue;
+                       }
+               }
 
-                       } else if (sock_flag(sk, SOCK_LINGER) &&
-                                  !sk->sk_lingertime)
-                               sctp_primitive_ABORT(asoc, NULL);
-                       else
-                               sctp_primitive_SHUTDOWN(asoc, NULL);
-               } else
+               if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
+                       sctp_primitive_ABORT(asoc, NULL);
+               else
                        sctp_primitive_SHUTDOWN(asoc, NULL);
        }
 
@@ -5317,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                 */
                sctp_release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
+               BUG_ON(sk != asoc->base.sk);
                sctp_lock_sock(sk);
 
                *timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         */
        newsp->type = type;
 
-       spin_lock_bh(&oldsk->sk_lock.slock);
-       /* Migrate the backlog from oldsk to newsk. */
-       sctp_backlog_migrate(assoc, oldsk, newsk);
-       /* Migrate the association to the new socket. */
+       /* Mark the new socket "in-use" by the user so that any packets
+        * that may arrive on the association after we've moved it are
+        * queued to the backlog.  This prevents a potential race between
+        * backlog processing on the old socket and new-packet processing
+        * on the new socket.
+        */
+       sctp_lock_sock(newsk);
        sctp_assoc_migrate(assoc, newsk);
-       spin_unlock_bh(&oldsk->sk_lock.slock);
 
        /* If the association on the newsk is already closed before accept()
         * is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
                newsk->sk_shutdown |= RCV_SHUTDOWN;
 
        newsk->sk_state = SCTP_SS_ESTABLISHED;
+       sctp_release_sock(newsk);
 }
 
 /* This proto struct describes the ULP interface for SCTP.  */
index 7177e98df7f33a4e06b521fbb2bb987150838246..c284dbb8b8c02fbba51aa562d5e4fd4551e641f3 100644 (file)
@@ -594,6 +594,10 @@ int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len)
 
                        *scontext_len = strlen(initial_sid_to_string[sid]) + 1;
                        scontextp = kmalloc(*scontext_len,GFP_ATOMIC);
+                       if (!scontextp) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
                        strcpy(scontextp, initial_sid_to_string[sid]);
                        *scontext = scontextp;
                        goto out;