]> err.no Git - linux-2.6/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 23 Jul 2008 19:04:34 +0000 (12:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 23 Jul 2008 19:04:34 +0000 (12:04 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc:
  sdhci: highmem capable PIO routines
  sg: reimplement sg mapping iterator
  mmc_test: print message when attaching to card
  mmc: Remove Russell as primecell mci maintainer
  mmc_block: bounce buffer highmem support
  sdhci: fix bad warning from commit c8b3e02
  sdhci: add warnings for bad buffers in ADMA path
  mmc_test: test oversized sg lists
  mmc_test: highmem tests
  s3cmci: ensure host stopped on machine shutdown
  au1xmmc: suspend/resume implementation
  s3cmci: fixes for section mismatch warnings
  pxamci: trivial fix of DMA alignment register bit clearing

120 files changed:
arch/arm/Kconfig
arch/arm/kernel/Makefile
arch/arm/kernel/kgdb.c [new file with mode: 0644]
arch/arm/kernel/setup.c
arch/arm/kernel/traps.c
arch/avr32/mach-at32ap/at32ap700x.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/kgdb.c [new file with mode: 0644]
arch/powerpc/kernel/setup_32.c
arch/powerpc/platforms/powermac/setup.c
arch/um/include/init.h
crypto/async_tx/async_memcpy.c
crypto/async_tx/async_memset.c
crypto/async_tx/async_tx.c
crypto/async_tx/async_xor.c
drivers/char/nvram.c
drivers/char/tty_io.c
drivers/dca/dca-core.c
drivers/dca/dca-sysfs.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/dmaengine.c
drivers/dma/dmatest.c [new file with mode: 0644]
drivers/dma/dw_dmac.c [new file with mode: 0644]
drivers/dma/dw_dmac_regs.h [new file with mode: 0644]
drivers/dma/fsldma.c
drivers/dma/ioat.c
drivers/dma/ioat_dca.c
drivers/dma/ioat_dma.c
drivers/dma/ioatdma.h
drivers/dma/ioatdma_hw.h
drivers/dma/ioatdma_registers.h
drivers/dma/iop-adma.c
drivers/dma/mv_xor.c [new file with mode: 0644]
drivers/dma/mv_xor.h [new file with mode: 0644]
drivers/hid/hid-core.c
drivers/hid/hid-input-quirks.c
drivers/hid/hid-input.c
drivers/hid/hidraw.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hid/usbhid/usbkbd.c
drivers/hid/usbhid/usbmouse.c
drivers/ide/Kconfig
drivers/ide/arm/icside.c
drivers/ide/arm/ide_arm.c
drivers/ide/arm/palm_bk3710.c
drivers/ide/arm/rapide.c
drivers/ide/h8300/ide-h8300.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd.h
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-floppy.c
drivers/ide/ide-generic.c
drivers/ide/ide-io.c
drivers/ide/ide-iops.c
drivers/ide/ide-lib.c
drivers/ide/ide-pnp.c
drivers/ide/ide-probe.c
drivers/ide/ide-proc.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/ide.c
drivers/ide/legacy/buddha.c
drivers/ide/legacy/falconide.c
drivers/ide/legacy/gayle.c
drivers/ide/legacy/ide-4drives.c
drivers/ide/legacy/ide-cs.c
drivers/ide/legacy/ide_platform.c
drivers/ide/legacy/macide.c
drivers/ide/legacy/q40ide.c
drivers/ide/mips/au1xxx-ide.c
drivers/ide/mips/swarm.c
drivers/ide/pci/aec62xx.c
drivers/ide/pci/alim15x3.c
drivers/ide/pci/amd74xx.c
drivers/ide/pci/cmd640.c
drivers/ide/pci/cmd64x.c
drivers/ide/pci/cs5520.c
drivers/ide/pci/cs5535.c
drivers/ide/pci/delkin_cb.c
drivers/ide/pci/hpt34x.c
drivers/ide/pci/hpt366.c
drivers/ide/pci/ns87415.c
drivers/ide/pci/pdc202xx_old.c
drivers/ide/pci/piix.c
drivers/ide/pci/scc_pata.c
drivers/ide/pci/serverworks.c
drivers/ide/pci/sgiioc4.c
drivers/ide/pci/siimage.c
drivers/ide/pci/sl82c105.c
drivers/ide/pci/tc86c001.c
drivers/ide/pci/via82cxxx.c
drivers/ide/ppc/pmac.c
drivers/ide/setup-pci.c
drivers/scsi/ide-scsi.c
drivers/serial/cpm_uart/cpm_uart_core.c
drivers/serial/mpsc.c
include/asm-arm/arch-iop13xx/adma.h
include/asm-arm/hardware/iop3xx-adma.h
include/asm-arm/kgdb.h [new file with mode: 0644]
include/asm-arm/plat-orion/mv_xor.h [new file with mode: 0644]
include/asm-arm/traps.h
include/asm-avr32/arch-at32ap/at32ap700x.h
include/asm-powerpc/kgdb.h
include/linux/async_tx.h
include/linux/dca.h
include/linux/dmaengine.h
include/linux/dw_dmac.h [new file with mode: 0644]
include/linux/hid.h
include/linux/ide.h
include/linux/pci_ids.h
kernel/irq/manage.c
lib/Kconfig.kgdb
net/core/user_dma.c

index c7ad324ddf2cf407c89839f75487f8e27f9fa0cc..d048f6887d0b0aff8e882283923c9a3640a6a816 100644 (file)
@@ -12,6 +12,7 @@ config ARM
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
        select HAVE_OPROFILE
+       select HAVE_ARCH_KGDB
        select HAVE_KPROBES if (!XIP_KERNEL)
        select HAVE_KRETPROBES if (HAVE_KPROBES)
        select HAVE_FTRACE if (!XIP_KERNEL)
index eb9092ca80080cd4431951e46c6f2ce8daf70ab8..1d296fc8494e01f7f33a8cac97855506727f3e46 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_KPROBES)         += kprobes.o kprobes-decode.o
 obj-$(CONFIG_ATAGS_PROC)       += atags.o
 obj-$(CONFIG_OABI_COMPAT)      += sys_oabi-compat.o
 obj-$(CONFIG_ARM_THUMBEE)      += thumbee.o
+obj-$(CONFIG_KGDB)             += kgdb.o
 
 obj-$(CONFIG_CRUNCH)           += crunch.o crunch-bits.o
 AFLAGS_crunch-bits.o           := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..aaffaec
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * arch/arm/kernel/kgdb.c
+ *
+ * ARM KGDB support
+ *
+ * Copyright (c) 2002-2004 MontaVista Software, Inc
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:  George Davis <davis_g@mvista.com>
+ *           Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/kgdb.h>
+#include <asm/traps.h>
+
+/* Make a local copy of the registers passed into the handler (bletch) */
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+       int regno;
+
+       /* Initialize all to zero. */
+       for (regno = 0; regno < GDB_MAX_REGS; regno++)
+               gdb_regs[regno] = 0;
+
+       gdb_regs[_R0]           = kernel_regs->ARM_r0;
+       gdb_regs[_R1]           = kernel_regs->ARM_r1;
+       gdb_regs[_R2]           = kernel_regs->ARM_r2;
+       gdb_regs[_R3]           = kernel_regs->ARM_r3;
+       gdb_regs[_R4]           = kernel_regs->ARM_r4;
+       gdb_regs[_R5]           = kernel_regs->ARM_r5;
+       gdb_regs[_R6]           = kernel_regs->ARM_r6;
+       gdb_regs[_R7]           = kernel_regs->ARM_r7;
+       gdb_regs[_R8]           = kernel_regs->ARM_r8;
+       gdb_regs[_R9]           = kernel_regs->ARM_r9;
+       gdb_regs[_R10]          = kernel_regs->ARM_r10;
+       gdb_regs[_FP]           = kernel_regs->ARM_fp;
+       gdb_regs[_IP]           = kernel_regs->ARM_ip;
+       gdb_regs[_SPT]          = kernel_regs->ARM_sp;
+       gdb_regs[_LR]           = kernel_regs->ARM_lr;
+       gdb_regs[_PC]           = kernel_regs->ARM_pc;
+       gdb_regs[_CPSR]         = kernel_regs->ARM_cpsr;
+}
+
+/* Copy local gdb registers back to kgdb regs, for later copy to kernel */
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+       kernel_regs->ARM_r0     = gdb_regs[_R0];
+       kernel_regs->ARM_r1     = gdb_regs[_R1];
+       kernel_regs->ARM_r2     = gdb_regs[_R2];
+       kernel_regs->ARM_r3     = gdb_regs[_R3];
+       kernel_regs->ARM_r4     = gdb_regs[_R4];
+       kernel_regs->ARM_r5     = gdb_regs[_R5];
+       kernel_regs->ARM_r6     = gdb_regs[_R6];
+       kernel_regs->ARM_r7     = gdb_regs[_R7];
+       kernel_regs->ARM_r8     = gdb_regs[_R8];
+       kernel_regs->ARM_r9     = gdb_regs[_R9];
+       kernel_regs->ARM_r10    = gdb_regs[_R10];
+       kernel_regs->ARM_fp     = gdb_regs[_FP];
+       kernel_regs->ARM_ip     = gdb_regs[_IP];
+       kernel_regs->ARM_sp     = gdb_regs[_SPT];
+       kernel_regs->ARM_lr     = gdb_regs[_LR];
+       kernel_regs->ARM_pc     = gdb_regs[_PC];
+       kernel_regs->ARM_cpsr   = gdb_regs[_CPSR];
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+       struct pt_regs *thread_regs;
+       int regno;
+
+       /* Just making sure... */
+       if (task == NULL)
+               return;
+
+       /* Initialize to zero */
+       for (regno = 0; regno < GDB_MAX_REGS; regno++)
+               gdb_regs[regno] = 0;
+
+       /* Otherwise, we have only some registers from switch_to() */
+       thread_regs             = task_pt_regs(task);
+       gdb_regs[_R0]           = thread_regs->ARM_r0;
+       gdb_regs[_R1]           = thread_regs->ARM_r1;
+       gdb_regs[_R2]           = thread_regs->ARM_r2;
+       gdb_regs[_R3]           = thread_regs->ARM_r3;
+       gdb_regs[_R4]           = thread_regs->ARM_r4;
+       gdb_regs[_R5]           = thread_regs->ARM_r5;
+       gdb_regs[_R6]           = thread_regs->ARM_r6;
+       gdb_regs[_R7]           = thread_regs->ARM_r7;
+       gdb_regs[_R8]           = thread_regs->ARM_r8;
+       gdb_regs[_R9]           = thread_regs->ARM_r9;
+       gdb_regs[_R10]          = thread_regs->ARM_r10;
+       gdb_regs[_FP]           = thread_regs->ARM_fp;
+       gdb_regs[_IP]           = thread_regs->ARM_ip;
+       gdb_regs[_SPT]          = thread_regs->ARM_sp;
+       gdb_regs[_LR]           = thread_regs->ARM_lr;
+       gdb_regs[_PC]           = thread_regs->ARM_pc;
+       gdb_regs[_CPSR]         = thread_regs->ARM_cpsr;
+}
+
+static int compiled_break;
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+                              int err_code, char *remcom_in_buffer,
+                              char *remcom_out_buffer,
+                              struct pt_regs *linux_regs)
+{
+       unsigned long addr;
+       char *ptr;
+
+       switch (remcom_in_buffer[0]) {
+       case 'D':
+       case 'k':
+       case 'c':
+               kgdb_contthread = NULL;
+
+               /*
+                * Try to read optional parameter, pc unchanged if no parm.
+                * If this was a compiled breakpoint, we need to move
+                * to the next instruction or we will just breakpoint
+                * over and over again.
+                */
+               ptr = &remcom_in_buffer[1];
+               if (kgdb_hex2long(&ptr, &addr))
+                       linux_regs->ARM_pc = addr;
+               else if (compiled_break == 1)
+                       linux_regs->ARM_pc += 4;
+
+               compiled_break = 0;
+
+               return 0;
+       }
+
+       return -1;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
+{
+       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+       return 0;
+}
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
+{
+       compiled_break = 1;
+       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+       return 0;
+}
+
+static struct undef_hook kgdb_brkpt_hook = {
+       .instr_mask             = 0xffffffff,
+       .instr_val              = KGDB_BREAKINST,
+       .fn                     = kgdb_brk_fn
+};
+
+static struct undef_hook kgdb_compiled_brkpt_hook = {
+       .instr_mask             = 0xffffffff,
+       .instr_val              = KGDB_COMPILED_BREAK,
+       .fn                     = kgdb_compiled_brk_fn
+};
+
+/**
+ *     kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ *     This function will handle the initalization of any architecture
+ *     specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+       register_undef_hook(&kgdb_brkpt_hook);
+       register_undef_hook(&kgdb_compiled_brkpt_hook);
+
+       return 0;
+}
+
+/**
+ *     kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ *     This function will handle the uninitalization of any architecture
+ *     specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+       unregister_undef_hook(&kgdb_brkpt_hook);
+       unregister_undef_hook(&kgdb_compiled_brkpt_hook);
+}
+
+/*
+ * Register our undef instruction hooks with ARM undef core.
+ * We regsiter a hook specifically looking for the KGB break inst
+ * and we handle the normal undef case within the do_undefinstr
+ * handler.
+ */
+struct kgdb_arch arch_kgdb_ops = {
+#ifndef __ARMEB__
+       .gdb_bpt_instr          = {0xfe, 0xde, 0xff, 0xe7}
+#else /* ! __ARMEB__ */
+       .gdb_bpt_instr          = {0xe7, 0xff, 0xde, 0xfe}
+#endif
+};
index b7b0720bc1bbe8295ea3ca7b2849e4a5ad875bf2..38f0e7940a132b1c9e0458bc10e9d1dc3f2401ee 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
+#include <asm/traps.h>
 
 #include "compat.h"
 #include "atags.h"
@@ -853,6 +854,7 @@ void __init setup_arch(char **cmdline_p)
        conswitchp = &dummy_con;
 #endif
 #endif
+       early_trap_init();
 }
 
 
index 5595fdd75e8200ec0e8d5ea8a54c750ed0398e1f..7277aef8309836fb74b6e2368beabf050757af9d 100644 (file)
@@ -707,6 +707,11 @@ void abort(void)
 EXPORT_SYMBOL(abort);
 
 void __init trap_init(void)
+{
+       return;
+}
+
+void __init early_trap_init(void)
 {
        unsigned long vectors = CONFIG_VECTORS_BASE;
        extern char __stubs_start[], __stubs_end[];
index 021d5121718469387fc1c4d95dd797a12f40bb3b..604f44f5dd164833a9bfdb075a7d65a713617fff 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dw_dmac.h>
 #include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
@@ -594,6 +595,17 @@ static void __init genclk_init_parent(struct clk *clk)
        clk->parent = parent;
 }
 
+static struct dw_dma_platform_data dw_dmac0_data = {
+       .nr_channels    = 3,
+};
+
+static struct resource dw_dmac0_resource[] = {
+       PBMEM(0xff200000),
+       IRQ(2),
+};
+DEFINE_DEV_DATA(dw_dmac, 0);
+DEV_CLK(hclk, dw_dmac0, hsb, 10);
+
 /* --------------------------------------------------------------------
  *  System peripherals
  * -------------------------------------------------------------------- */
@@ -708,17 +720,6 @@ static struct clk pico_clk = {
        .users          = 1,
 };
 
-static struct resource dmaca0_resource[] = {
-       {
-               .start  = 0xff200000,
-               .end    = 0xff20ffff,
-               .flags  = IORESOURCE_MEM,
-       },
-       IRQ(2),
-};
-DEFINE_DEV(dmaca, 0);
-DEV_CLK(hclk, dmaca0, hsb, 10);
-
 /* --------------------------------------------------------------------
  * HMATRIX
  * -------------------------------------------------------------------- */
@@ -831,7 +832,7 @@ void __init at32_add_system_devices(void)
        platform_device_register(&at32_eic0_device);
        platform_device_register(&smc0_device);
        platform_device_register(&pdc_device);
-       platform_device_register(&dmaca0_device);
+       platform_device_register(&dw_dmac0_device);
 
        platform_device_register(&at32_tcb0_device);
        platform_device_register(&at32_tcb1_device);
@@ -2032,7 +2033,7 @@ struct clk *at32_clock_list[] = {
        &smc0_mck,
        &pdc_hclk,
        &pdc_pclk,
-       &dmaca0_hclk,
+       &dw_dmac0_hclk,
        &pico_clk,
        &pio0_mck,
        &pio1_mck,
index 4c22242b396f7f49dd86a02b2da56becc82fd3ff..737ebf9d12bb9aca170de68098c758f796d46b0f 100644 (file)
@@ -112,6 +112,7 @@ config PPC
        select HAVE_FTRACE
        select HAVE_IDE
        select HAVE_KPROBES
+       select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
        select HAVE_LMB
        select HAVE_DMA_ATTRS if PPC64
index 2840ab69ef4ec73c28be7dc6acfcea9c2b5c020e..8c8aadbe9563ef14d3671929f2b6c17210e7c354 100644 (file)
@@ -41,22 +41,6 @@ config HCALL_STATS
          This option will add a small amount of overhead to all hypervisor
          calls.
 
-config DEBUGGER
-       bool "Enable debugger hooks"
-       depends on DEBUG_KERNEL
-       help
-         Include in-kernel hooks for kernel debuggers. Unless you are
-         intending to debug the kernel, say N here.
-
-config KGDB
-       bool "Include kgdb kernel debugger"
-       depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx)
-       select DEBUG_INFO
-       help
-         Include in-kernel hooks for kgdb, the Linux kernel source level
-         debugger.  See <http://kgdb.sourceforge.net/> for more information.
-         Unless you are intending to debug the kernel, say N here.
-
 config CODE_PATCHING_SELFTEST
        bool "Run self-tests of the code-patching code."
        depends on DEBUG_KERNEL
@@ -67,36 +51,9 @@ config FTR_FIXUP_SELFTEST
        depends on DEBUG_KERNEL
        default n
 
-choice
-       prompt "Serial Port"
-       depends on KGDB
-       default KGDB_TTYS1
-
-config KGDB_TTYS0
-       bool "ttyS0"
-
-config KGDB_TTYS1
-       bool "ttyS1"
-
-config KGDB_TTYS2
-       bool "ttyS2"
-
-config KGDB_TTYS3
-       bool "ttyS3"
-
-endchoice
-
-config KGDB_CONSOLE
-       bool "Enable serial console thru kgdb port"
-       depends on KGDB && 8xx || CPM2
-       help
-         If you enable this, all serial console messages will be sent
-         over the gdb stub.
-         If unsure, say N.
-
 config XMON
        bool "Include xmon kernel debugger"
-       depends on DEBUGGER
+       depends on DEBUG_KERNEL
        help
          Include in-kernel hooks for the xmon kernel monitor/debugger.
          Unless you are intending to debug the kernel, say N here.
@@ -126,6 +83,11 @@ config XMON_DISASSEMBLY
          to say Y here, unless you're building for a memory-constrained
          system.
 
+config DEBUGGER
+       bool
+       depends on KGDB || XMON
+       default y
+
 config IRQSTACKS
        bool "Use separate kernel stacks when processing interrupts"
        help
index bf0b1fd0ec3470080de306d9b47c56db9a034650..1a4094704b1fc630386041239241008c5d457487 100644 (file)
@@ -74,6 +74,7 @@ obj-y                         += time.o prom.o traps.o setup-common.o \
                                   misc_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)            += entry_32.o setup_32.o
 obj-$(CONFIG_PPC64)            += dma_64.o iommu.o
+obj-$(CONFIG_KGDB)             += kgdb.o
 obj-$(CONFIG_PPC_MULTIPLATFORM)        += prom_init.o
 obj-$(CONFIG_MODULES)          += ppc_ksyms.o
 obj-$(CONFIG_BOOTX_TEXT)       += btext.o
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..b4fdf2f
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * PowerPC backend to the KGDB stub.
+ *
+ * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
+ * Copyright (C) 2003 Timesys Corporation.
+ * Copyright (C) 2004-2006 MontaVista Software, Inc.
+ * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
+ * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
+ * Sergei Shtylyov <sshtylyov@ru.mvista.com>
+ * Copyright (C) 2007-2008 Wind River Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program as licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/smp.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/machdep.h>
+
+/*
+ * This table contains the mapping between PowerPC hardware trap types, and
+ * signals, which are primarily what GDB understands.  GDB and the kernel
+ * don't always agree on values, so we use constants taken from gdb-6.2.
+ */
+static struct hard_trap_info
+{
+       unsigned int tt;                /* Trap type code for powerpc */
+       unsigned char signo;            /* Signal that we map this trap into */
+} hard_trap_info[] = {
+       { 0x0100, 0x02 /* SIGINT */  },         /* system reset */
+       { 0x0200, 0x0b /* SIGSEGV */ },         /* machine check */
+       { 0x0300, 0x0b /* SIGSEGV */ },         /* data access */
+       { 0x0400, 0x0b /* SIGSEGV */ },         /* instruction access */
+       { 0x0500, 0x02 /* SIGINT */  },         /* external interrupt */
+       { 0x0600, 0x0a /* SIGBUS */  },         /* alignment */
+       { 0x0700, 0x05 /* SIGTRAP */ },         /* program check */
+       { 0x0800, 0x08 /* SIGFPE */  },         /* fp unavailable */
+       { 0x0900, 0x0e /* SIGALRM */ },         /* decrementer */
+       { 0x0c00, 0x14 /* SIGCHLD */ },         /* system call */
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+       { 0x2002, 0x05 /* SIGTRAP */ },         /* debug */
+#if defined(CONFIG_FSL_BOOKE)
+       { 0x2010, 0x08 /* SIGFPE */  },         /* spe unavailable */
+       { 0x2020, 0x08 /* SIGFPE */  },         /* spe unavailable */
+       { 0x2030, 0x08 /* SIGFPE */  },         /* spe fp data */
+       { 0x2040, 0x08 /* SIGFPE */  },         /* spe fp data */
+       { 0x2050, 0x08 /* SIGFPE */  },         /* spe fp round */
+       { 0x2060, 0x0e /* SIGILL */  },         /* performace monitor */
+       { 0x2900, 0x08 /* SIGFPE */  },         /* apu unavailable */
+       { 0x3100, 0x0e /* SIGALRM */ },         /* fixed interval timer */
+       { 0x3200, 0x02 /* SIGINT */  },         /* watchdog */
+#else /* ! CONFIG_FSL_BOOKE */
+       { 0x1000, 0x0e /* SIGALRM */ },         /* prog interval timer */
+       { 0x1010, 0x0e /* SIGALRM */ },         /* fixed interval timer */
+       { 0x1020, 0x02 /* SIGINT */  },         /* watchdog */
+       { 0x2010, 0x08 /* SIGFPE */  },         /* fp unavailable */
+       { 0x2020, 0x08 /* SIGFPE */  },         /* ap unavailable */
+#endif
+#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
+       { 0x0d00, 0x05 /* SIGTRAP */ },         /* single-step */
+#if defined(CONFIG_8xx)
+       { 0x1000, 0x04 /* SIGILL */  },         /* software emulation */
+#else /* ! CONFIG_8xx */
+       { 0x0f00, 0x04 /* SIGILL */  },         /* performance monitor */
+       { 0x0f20, 0x08 /* SIGFPE */  },         /* altivec unavailable */
+       { 0x1300, 0x05 /* SIGTRAP */ },         /* instruction address break */
+#if defined(CONFIG_PPC64)
+       { 0x1200, 0x05 /* SIGILL */  },         /* system error */
+       { 0x1500, 0x04 /* SIGILL */  },         /* soft patch */
+       { 0x1600, 0x04 /* SIGILL */  },         /* maintenance */
+       { 0x1700, 0x08 /* SIGFPE */  },         /* altivec assist */
+       { 0x1800, 0x04 /* SIGILL */  },         /* thermal */
+#else /* ! CONFIG_PPC64 */
+       { 0x1400, 0x02 /* SIGINT */  },         /* SMI */
+       { 0x1600, 0x08 /* SIGFPE */  },         /* altivec assist */
+       { 0x1700, 0x04 /* SIGILL */  },         /* TAU */
+       { 0x2000, 0x05 /* SIGTRAP */ },         /* run mode */
+#endif
+#endif
+#endif
+       { 0x0000, 0x00 }                        /* Must be last */
+};
+
+static int computeSignal(unsigned int tt)
+{
+       struct hard_trap_info *ht;
+
+       for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+               if (ht->tt == tt)
+                       return ht->signo;
+
+       return SIGHUP;          /* default for things we don't know about */
+}
+
+static int kgdb_call_nmi_hook(struct pt_regs *regs)
+{
+       kgdb_nmicallback(raw_smp_processor_id(), regs);
+       return 0;
+}
+
+#ifdef CONFIG_SMP
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       smp_send_debugger_break(MSG_ALL_BUT_SELF);
+}
+#endif
+
+/* KGDB functions to use existing PowerPC64 hooks. */
+static int kgdb_debugger(struct pt_regs *regs)
+{
+       return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+}
+
+static int kgdb_handle_breakpoint(struct pt_regs *regs)
+{
+       if (user_mode(regs))
+               return 0;
+
+       if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+               return 0;
+
+       if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
+               regs->nip += 4;
+
+       return 1;
+}
+
+static int kgdb_singlestep(struct pt_regs *regs)
+{
+       struct thread_info *thread_info, *exception_thread_info;
+
+       if (user_mode(regs))
+               return 0;
+
+       /*
+        * On Book E and perhaps other processsors, singlestep is handled on
+        * the critical exception stack.  This causes current_thread_info()
+        * to fail, since it it locates the thread_info by masking off
+        * the low bits of the current stack pointer.  We work around
+        * this issue by copying the thread_info from the kernel stack
+        * before calling kgdb_handle_exception, and copying it back
+        * afterwards.  On most processors the copy is avoided since
+        * exception_thread_info == thread_info.
+        */
+       thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
+       exception_thread_info = current_thread_info();
+
+       if (thread_info != exception_thread_info)
+               memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+
+       kgdb_handle_exception(0, SIGTRAP, 0, regs);
+
+       if (thread_info != exception_thread_info)
+               memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+
+       return 1;
+}
+
+static int kgdb_iabr_match(struct pt_regs *regs)
+{
+       if (user_mode(regs))
+               return 0;
+
+       if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
+               return 0;
+       return 1;
+}
+
+static int kgdb_dabr_match(struct pt_regs *regs)
+{
+       if (user_mode(regs))
+               return 0;
+
+       if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
+               return 0;
+       return 1;
+}
+
+#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
+
+#define PACK32(ptr, src) do {          \
+       u32 *ptr32;                   \
+       ptr32 = (u32 *)ptr;           \
+       *(ptr32++) = (src);           \
+       ptr = (unsigned long *)ptr32; \
+       } while (0)
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+       unsigned long *ptr = gdb_regs;
+       int reg;
+
+       memset(gdb_regs, 0, NUMREGBYTES);
+
+       for (reg = 0; reg < 32; reg++)
+               PACK64(ptr, regs->gpr[reg]);
+
+#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_SPE
+       for (reg = 0; reg < 32; reg++)
+               PACK64(ptr, current->thread.evr[reg]);
+#else
+       ptr += 32;
+#endif
+#else
+       /* fp registers not used by kernel, leave zero */
+       ptr += 32 * 8 / sizeof(long);
+#endif
+
+       PACK64(ptr, regs->nip);
+       PACK64(ptr, regs->msr);
+       PACK32(ptr, regs->ccr);
+       PACK64(ptr, regs->link);
+       PACK64(ptr, regs->ctr);
+       PACK32(ptr, regs->xer);
+
+       BUG_ON((unsigned long)ptr >
+              (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+       struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
+                                                 STACK_FRAME_OVERHEAD);
+       unsigned long *ptr = gdb_regs;
+       int reg;
+
+       memset(gdb_regs, 0, NUMREGBYTES);
+
+       /* Regs GPR0-2 */
+       for (reg = 0; reg < 3; reg++)
+               PACK64(ptr, regs->gpr[reg]);
+
+       /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
+       ptr += 11;
+
+       /* Regs GPR14-31 */
+       for (reg = 14; reg < 32; reg++)
+               PACK64(ptr, regs->gpr[reg]);
+
+#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_SPE
+       for (reg = 0; reg < 32; reg++)
+               PACK64(ptr, p->thread.evr[reg]);
+#else
+       ptr += 32;
+#endif
+#else
+       /* fp registers not used by kernel, leave zero */
+       ptr += 32 * 8 / sizeof(long);
+#endif
+
+       PACK64(ptr, regs->nip);
+       PACK64(ptr, regs->msr);
+       PACK32(ptr, regs->ccr);
+       PACK64(ptr, regs->link);
+       PACK64(ptr, regs->ctr);
+       PACK32(ptr, regs->xer);
+
+       BUG_ON((unsigned long)ptr >
+              (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+}
+
+#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
+
+#define UNPACK32(dest, ptr) do {       \
+       u32 *ptr32;                   \
+       ptr32 = (u32 *)ptr;           \
+       dest = *(ptr32++);            \
+       ptr = (unsigned long *)ptr32; \
+       } while (0)
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+       unsigned long *ptr = gdb_regs;
+       int reg;
+#ifdef CONFIG_SPE
+       union {
+               u32 v32[2];
+               u64 v64;
+       } acc;
+#endif
+
+       for (reg = 0; reg < 32; reg++)
+               UNPACK64(regs->gpr[reg], ptr);
+
+#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_SPE
+       for (reg = 0; reg < 32; reg++)
+               UNPACK64(current->thread.evr[reg], ptr);
+#else
+       ptr += 32;
+#endif
+#else
+       /* fp registers not used by kernel, leave zero */
+       ptr += 32 * 8 / sizeof(int);
+#endif
+
+       UNPACK64(regs->nip, ptr);
+       UNPACK64(regs->msr, ptr);
+       UNPACK32(regs->ccr, ptr);
+       UNPACK64(regs->link, ptr);
+       UNPACK64(regs->ctr, ptr);
+       UNPACK32(regs->xer, ptr);
+
+       BUG_ON((unsigned long)ptr >
+              (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+}
+
+/*
+ * This function does PowerPC specific procesing for interfacing to gdb.
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+                              char *remcom_in_buffer, char *remcom_out_buffer,
+                              struct pt_regs *linux_regs)
+{
+       char *ptr = &remcom_in_buffer[1];
+       unsigned long addr;
+
+       switch (remcom_in_buffer[0]) {
+               /*
+                * sAA..AA   Step one instruction from AA..AA
+                * This will return an error to gdb ..
+                */
+       case 's':
+       case 'c':
+               /* handle the optional parameter */
+               if (kgdb_hex2long(&ptr, &addr))
+                       linux_regs->nip = addr;
+
+               atomic_set(&kgdb_cpu_doing_single_step, -1);
+               /* set the trace bit if we're stepping */
+               if (remcom_in_buffer[0] == 's') {
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+                       mtspr(SPRN_DBCR0,
+                             mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+                       linux_regs->msr |= MSR_DE;
+#else
+                       linux_regs->msr |= MSR_SE;
+#endif
+                       kgdb_single_step = 1;
+                       if (kgdb_contthread)
+                               atomic_set(&kgdb_cpu_doing_single_step,
+                                          raw_smp_processor_id());
+               }
+               return 0;
+       }
+
+       return -1;
+}
+
+/*
+ * Global data
+ */
+struct kgdb_arch arch_kgdb_ops = {
+       .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
+};
+
+static int kgdb_not_implemented(struct pt_regs *regs)
+{
+       return 0;
+}
+
+static void *old__debugger_ipi;
+static void *old__debugger;
+static void *old__debugger_bpt;
+static void *old__debugger_sstep;
+static void *old__debugger_iabr_match;
+static void *old__debugger_dabr_match;
+static void *old__debugger_fault_handler;
+
+int kgdb_arch_init(void)
+{
+       old__debugger_ipi = __debugger_ipi;
+       old__debugger = __debugger;
+       old__debugger_bpt = __debugger_bpt;
+       old__debugger_sstep = __debugger_sstep;
+       old__debugger_iabr_match = __debugger_iabr_match;
+       old__debugger_dabr_match = __debugger_dabr_match;
+       old__debugger_fault_handler = __debugger_fault_handler;
+
+       __debugger_ipi = kgdb_call_nmi_hook;
+       __debugger = kgdb_debugger;
+       __debugger_bpt = kgdb_handle_breakpoint;
+       __debugger_sstep = kgdb_singlestep;
+       __debugger_iabr_match = kgdb_iabr_match;
+       __debugger_dabr_match = kgdb_dabr_match;
+       __debugger_fault_handler = kgdb_not_implemented;
+
+       return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+       __debugger_ipi = old__debugger_ipi;
+       __debugger = old__debugger;
+       __debugger_bpt = old__debugger_bpt;
+       __debugger_sstep = old__debugger_sstep;
+       __debugger_iabr_match = old__debugger_iabr_match;
+       __debugger_dabr_match = old__debugger_dabr_match;
+       __debugger_fault_handler = old__debugger_fault_handler;
+}
index 4efebe88e64a9607c98281a3f8489430e35495df..066e65c59b58230091db2145e1e4115abe541b84 100644 (file)
 
 #define DBG(fmt...)
 
-#if defined CONFIG_KGDB
-#include <asm/kgdb.h>
-#endif
-
 extern void bootx_init(unsigned long r4, unsigned long phys);
 
 int boot_cpuid;
@@ -302,18 +298,6 @@ void __init setup_arch(char **cmdline_p)
 
        xmon_setup();
 
-#if defined(CONFIG_KGDB)
-       if (ppc_md.kgdb_map_scc)
-               ppc_md.kgdb_map_scc();
-       set_debug_traps();
-       if (strstr(cmd_line, "gdb")) {
-               if (ppc_md.progress)
-                       ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
-               printk("kgdb breakpoint activated\n");
-               breakpoint();
-       }
-#endif
-
        /*
         * Set cache line size based on type of cpu as a default.
         * Systems with OF can look in the properties on the cpu node(s)
index 00bd0166d07fd78261a4230bbcb30555f313a356..31635446901a95bbbab17a69543912c95f79ab5e 100644 (file)
@@ -97,8 +97,6 @@ extern struct machdep_calls pmac_md;
 int sccdbg;
 #endif
 
-extern void zs_kgdb_hook(int tty_num);
-
 sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
 EXPORT_SYMBOL(sys_ctrler);
 
@@ -329,10 +327,6 @@ static void __init pmac_setup_arch(void)
        l2cr_init();
 #endif /* CONFIG_PPC32 */
 
-#ifdef CONFIG_KGDB
-       zs_kgdb_hook(0);
-#endif
-
        find_via_cuda();
        find_via_pmu();
        smu_init();
index b00a95741d4115339db192cddf54a7ba1b977ab7..37dd097c16c07131282b8ddd484d52e1cb6fd369 100644 (file)
@@ -45,12 +45,20 @@ typedef void (*exitcall_t)(void);
 # define __section(S) __attribute__ ((__section__(#S)))
 #endif
 
+#if __GNUC__ == 3
+
 #if __GNUC_MINOR__ >= 3
 # define __used                        __attribute__((__used__))
 #else
 # define __used                        __attribute__((__unused__))
 #endif
 
+#else
+#if __GNUC__ == 4
+# define __used                        __attribute__((__used__))
+#endif
+#endif
+
 #else
 #include <linux/compiler.h>
 #endif
index a5eda80e84277806b4cbb49c32d55dad233df381..ddccfb01c416b9a636324ee273244b147c644a16 100644 (file)
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
                pr_debug("%s: (sync) len: %zu\n", __func__, len);
 
                /* wait for any prerequisite operations */
-               if (depend_tx) {
-                       /* if ack is already set then we cannot be sure
-                        * we are referring to the correct operation
-                        */
-                       BUG_ON(async_tx_test_ack(depend_tx));
-                       if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
-                               panic("%s: DMA_ERROR waiting for depend_tx\n",
-                                       __func__);
-               }
+               async_tx_quiesce(&depend_tx);
 
                dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
                src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
                kunmap_atomic(dest_buf, KM_USER0);
                kunmap_atomic(src_buf, KM_USER1);
 
-               async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+               async_tx_sync_epilog(cb_fn, cb_param);
        }
 
        return tx;
index f5ff3906b035d52d5c6b28949ca6470b46d60f88..5b5eb99bb244311bb1456c62080249d812967be4 100644 (file)
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
                dest_buf = (void *) (((char *) page_address(dest)) + offset);
 
                /* wait for any prerequisite operations */
-               if (depend_tx) {
-                       /* if ack is already set then we cannot be sure
-                        * we are referring to the correct operation
-                        */
-                       BUG_ON(depend_tx->ack);
-                       if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
-                               panic("%s: DMA_ERROR waiting for depend_tx\n",
-                                       __func__);
-               }
+               async_tx_quiesce(&depend_tx);
 
                memset(dest_buf, val, len);
 
-               async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+               async_tx_sync_epilog(cb_fn, cb_param);
        }
 
        return tx;
index 095c798d31700746d8bd6769e3f5f8d015d7e264..85eaf7b1c53153ef07bdc33efa3a9d9c6cdff839 100644 (file)
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
        case DMA_RESOURCE_REMOVED:
                found = 0;
                spin_lock_irqsave(&async_tx_lock, flags);
-               list_for_each_entry_rcu(ref, &async_tx_master_list, node)
+               list_for_each_entry(ref, &async_tx_master_list, node)
                        if (ref->chan == chan) {
                                /* permit backing devices to go away */
                                dma_chan_put(ref->chan);
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
                pr_debug("%s: (sync)\n", __func__);
 
                /* wait for any prerequisite operations */
-               if (depend_tx) {
-                       /* if ack is already set then we cannot be sure
-                        * we are referring to the correct operation
-                        */
-                       BUG_ON(async_tx_test_ack(depend_tx));
-                       if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
-                               panic("%s: DMA_ERROR waiting for depend_tx\n",
-                                       __func__);
-               }
+               async_tx_quiesce(&depend_tx);
 
-               async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+               async_tx_sync_epilog(cb_fn, cb_param);
        }
 
        return tx;
 }
 EXPORT_SYMBOL_GPL(async_trigger_callback);
 
+/**
+ * async_tx_quiesce - ensure tx is complete and freeable upon return
+ * @tx - transaction to quiesce
+ */
+void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
+{
+       if (*tx) {
+               /* if ack is already set then we cannot be sure
+                * we are referring to the correct operation
+                */
+               BUG_ON(async_tx_test_ack(*tx));
+               if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
+                       panic("DMA_ERROR waiting for transaction\n");
+               async_tx_ack(*tx);
+               *tx = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(async_tx_quiesce);
+
 module_init(async_tx_init);
 module_exit(async_tx_exit);
 
index 3a0dddca5a1097e473d103a15797547da66e7bf1..65974c6d3d7a78dbc40e9437b416d5990e04e9a2 100644 (file)
  *     when CONFIG_DMA_ENGINE=n
  */
 static __always_inline struct dma_async_tx_descriptor *
-do_async_xor(struct dma_device *device,
-       struct dma_chan *chan, struct page *dest, struct page **src_list,
-       unsigned int offset, unsigned int src_cnt, size_t len,
-       enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
-       dma_async_tx_callback cb_fn, void *cb_param)
+do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
+            unsigned int offset, int src_cnt, size_t len,
+            enum async_tx_flags flags,
+            struct dma_async_tx_descriptor *depend_tx,
+            dma_async_tx_callback cb_fn, void *cb_param)
 {
-       dma_addr_t dma_dest;
+       struct dma_device *dma = chan->device;
        dma_addr_t *dma_src = (dma_addr_t *) src_list;
-       struct dma_async_tx_descriptor *tx;
+       struct dma_async_tx_descriptor *tx = NULL;
+       int src_off = 0;
        int i;
-       unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
-
-       pr_debug("%s: len: %zu\n", __func__, len);
-
-       dma_dest = dma_map_page(device->dev, dest, offset, len,
-                               DMA_FROM_DEVICE);
+       dma_async_tx_callback _cb_fn;
+       void *_cb_param;
+       enum async_tx_flags async_flags;
+       enum dma_ctrl_flags dma_flags;
+       int xor_src_cnt;
+       dma_addr_t dma_dest;
 
+       dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
        for (i = 0; i < src_cnt; i++)
-               dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
+               dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
                                          len, DMA_TO_DEVICE);
 
-       /* Since we have clobbered the src_list we are committed
-        * to doing this asynchronously.  Drivers force forward progress
-        * in case they can not provide a descriptor
-        */
-       tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
-                                        dma_prep_flags);
-       if (!tx) {
-               if (depend_tx)
-                       dma_wait_for_async_tx(depend_tx);
-
-               while (!tx)
-                       tx = device->device_prep_dma_xor(chan, dma_dest,
-                                                        dma_src, src_cnt, len,
-                                                        dma_prep_flags);
-       }
+       while (src_cnt) {
+               async_flags = flags;
+               dma_flags = 0;
+               xor_src_cnt = min(src_cnt, dma->max_xor);
+               /* if we are submitting additional xors, leave the chain open,
+                * clear the callback parameters, and leave the destination
+                * buffer mapped
+                */
+               if (src_cnt > xor_src_cnt) {
+                       async_flags &= ~ASYNC_TX_ACK;
+                       dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
+                       _cb_fn = NULL;
+                       _cb_param = NULL;
+               } else {
+                       _cb_fn = cb_fn;
+                       _cb_param = cb_param;
+               }
+               if (_cb_fn)
+                       dma_flags |= DMA_PREP_INTERRUPT;
 
-       async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+               /* Since we have clobbered the src_list we are committed
+                * to doing this asynchronously.  Drivers force forward progress
+                * in case they can not provide a descriptor
+                */
+               tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
+                                             xor_src_cnt, len, dma_flags);
+
+               if (unlikely(!tx))
+                       async_tx_quiesce(&depend_tx);
+
+               /* spin wait for the preceeding transactions to complete */
+               while (unlikely(!tx)) {
+                       dma_async_issue_pending(chan);
+                       tx = dma->device_prep_dma_xor(chan, dma_dest,
+                                                     &dma_src[src_off],
+                                                     xor_src_cnt, len,
+                                                     dma_flags);
+               }
+
+               async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
+                               _cb_param);
+
+               depend_tx = tx;
+               flags |= ASYNC_TX_DEP_ACK;
+
+               if (src_cnt > xor_src_cnt) {
+                       /* drop completed sources */
+                       src_cnt -= xor_src_cnt;
+                       src_off += xor_src_cnt;
+
+                       /* use the intermediate result a source */
+                       dma_src[--src_off] = dma_dest;
+                       src_cnt++;
+               } else
+                       break;
+       }
 
        return tx;
 }
 
 static void
 do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
-       unsigned int src_cnt, size_t len, enum async_tx_flags flags,
-       struct dma_async_tx_descriptor *depend_tx,
-       dma_async_tx_callback cb_fn, void *cb_param)
+           int src_cnt, size_t len, enum async_tx_flags flags,
+           dma_async_tx_callback cb_fn, void *cb_param)
 {
-       void *_dest;
        int i;
-
-       pr_debug("%s: len: %zu\n", __func__, len);
+       int xor_src_cnt;
+       int src_off = 0;
+       void *dest_buf;
+       void **srcs = (void **) src_list;
 
        /* reuse the 'src_list' array to convert to buffer pointers */
        for (i = 0; i < src_cnt; i++)
-               src_list[i] = (struct page *)
-                       (page_address(src_list[i]) + offset);
+               srcs[i] = page_address(src_list[i]) + offset;
 
        /* set destination address */
-       _dest = page_address(dest) + offset;
+       dest_buf = page_address(dest) + offset;
 
        if (flags & ASYNC_TX_XOR_ZERO_DST)
-               memset(_dest, 0, len);
+               memset(dest_buf, 0, len);
 
-       xor_blocks(src_cnt, len, _dest,
-               (void **) src_list);
+       while (src_cnt > 0) {
+               /* process up to 'MAX_XOR_BLOCKS' sources */
+               xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
+               xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
 
-       async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+               /* drop completed sources */
+               src_cnt -= xor_src_cnt;
+               src_off += xor_src_cnt;
+       }
+
+       async_tx_sync_epilog(cb_fn, cb_param);
 }
 
 /**
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
        struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
                                                      &dest, 1, src_list,
                                                      src_cnt, len);
-       struct dma_device *device = chan ? chan->device : NULL;
-       struct dma_async_tx_descriptor *tx = NULL;
-       dma_async_tx_callback _cb_fn;
-       void *_cb_param;
-       unsigned long local_flags;
-       int xor_src_cnt;
-       int i = 0, src_off = 0;
-
        BUG_ON(src_cnt <= 1);
 
-       while (src_cnt) {
-               local_flags = flags;
-               if (device) { /* run the xor asynchronously */
-                       xor_src_cnt = min(src_cnt, device->max_xor);
-                       /* if we are submitting additional xors
-                        * only set the callback on the last transaction
-                        */
-                       if (src_cnt > xor_src_cnt) {
-                               local_flags &= ~ASYNC_TX_ACK;
-                               _cb_fn = NULL;
-                               _cb_param = NULL;
-                       } else {
-                               _cb_fn = cb_fn;
-                               _cb_param = cb_param;
-                       }
-
-                       tx = do_async_xor(device, chan, dest,
-                                         &src_list[src_off], offset,
-                                         xor_src_cnt, len, local_flags,
-                                         depend_tx, _cb_fn, _cb_param);
-               } else { /* run the xor synchronously */
-                       /* in the sync case the dest is an implied source
-                        * (assumes the dest is at the src_off index)
-                        */
-                       if (flags & ASYNC_TX_XOR_DROP_DST) {
-                               src_cnt--;
-                               src_off++;
-                       }
-
-                       /* process up to 'MAX_XOR_BLOCKS' sources */
-                       xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
-
-                       /* if we are submitting additional xors
-                        * only set the callback on the last transaction
-                        */
-                       if (src_cnt > xor_src_cnt) {
-                               local_flags &= ~ASYNC_TX_ACK;
-                               _cb_fn = NULL;
-                               _cb_param = NULL;
-                       } else {
-                               _cb_fn = cb_fn;
-                               _cb_param = cb_param;
-                       }
-
-                       /* wait for any prerequisite operations */
-                       if (depend_tx) {
-                               /* if ack is already set then we cannot be sure
-                                * we are referring to the correct operation
-                                */
-                               BUG_ON(async_tx_test_ack(depend_tx));
-                               if (dma_wait_for_async_tx(depend_tx) ==
-                                       DMA_ERROR)
-                                       panic("%s: DMA_ERROR waiting for "
-                                               "depend_tx\n",
-                                               __func__);
-                       }
-
-                       do_sync_xor(dest, &src_list[src_off], offset,
-                               xor_src_cnt, len, local_flags, depend_tx,
-                               _cb_fn, _cb_param);
-               }
+       if (chan) {
+               /* run the xor asynchronously */
+               pr_debug("%s (async): len: %zu\n", __func__, len);
 
-               /* the previous tx is hidden from the client,
-                * so ack it
-                */
-               if (i && depend_tx)
-                       async_tx_ack(depend_tx);
+               return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
+                                   flags, depend_tx, cb_fn, cb_param);
+       } else {
+               /* run the xor synchronously */
+               pr_debug("%s (sync): len: %zu\n", __func__, len);
 
-               depend_tx = tx;
+               /* in the sync case the dest is an implied source
+                * (assumes the dest is the first source)
+                */
+               if (flags & ASYNC_TX_XOR_DROP_DST) {
+                       src_cnt--;
+                       src_list++;
+               }
 
-               if (src_cnt > xor_src_cnt) {
-                       /* drop completed sources */
-                       src_cnt -= xor_src_cnt;
-                       src_off += xor_src_cnt;
+               /* wait for any prerequisite operations */
+               async_tx_quiesce(&depend_tx);
 
-                       /* unconditionally preserve the destination */
-                       flags &= ~ASYNC_TX_XOR_ZERO_DST;
+               do_sync_xor(dest, src_list, offset, src_cnt, len,
+                           flags, cb_fn, cb_param);
 
-                       /* use the intermediate result a source, but remember
-                        * it's dropped, because it's implied, in the sync case
-                        */
-                       src_list[--src_off] = dest;
-                       src_cnt++;
-                       flags |= ASYNC_TX_XOR_DROP_DST;
-               } else
-                       src_cnt = 0;
-               i++;
+               return NULL;
        }
-
-       return tx;
 }
 EXPORT_SYMBOL_GPL(async_xor);
 
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
                tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
                                                      len, result,
                                                      dma_prep_flags);
-               if (!tx) {
-                       if (depend_tx)
-                               dma_wait_for_async_tx(depend_tx);
+               if (unlikely(!tx)) {
+                       async_tx_quiesce(&depend_tx);
 
                        while (!tx)
+                               dma_async_issue_pending(chan);
                                tx = device->device_prep_dma_zero_sum(chan,
                                        dma_src, src_cnt, len, result,
                                        dma_prep_flags);
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
                tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
                        depend_tx, NULL, NULL);
 
-               if (tx) {
-                       if (dma_wait_for_async_tx(tx) == DMA_ERROR)
-                               panic("%s: DMA_ERROR waiting for tx\n",
-                                       __func__);
-                       async_tx_ack(tx);
-               }
+               async_tx_quiesce(&tx);
 
                *result = page_is_zero(dest, offset, len) ? 0 : 1;
 
-               tx = NULL;
-
-               async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+               async_tx_sync_epilog(cb_fn, cb_param);
        }
 
        return tx;
index a22662b6a1a50fc6aa25f7a000d619abd7ba1bc7..39f6357e3b5d0a63e432cf0238cc84aba4b8d33f 100644 (file)
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/spinlock.h>
-#include <linux/smp_lock.h>
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
index 15e597d030026b4b45ee5d6e61d306b379fa5dde..fa48dba5ba5e94ff5833f16c8e4e758bb1ffbd66 100644 (file)
@@ -915,7 +915,7 @@ static void tty_reset_termios(struct tty_struct *tty)
  *     do_tty_hangup           -       actual handler for hangup events
  *     @work: tty device
  *
-k *    This can be called by the "eventd" kernel thread.  That is process
+ *     This can be called by the "eventd" kernel thread.  That is process
  *     synchronous but doesn't hold any locks, so we need to make sure we
  *     have the appropriate locks for what we're doing.
  *
index bf5b92f86df7e07ca3813a4e127f21c7b92f9bff..ec249d2db633edb68d57ad22f8f27d42e2c8e5f4 100644 (file)
 #include <linux/device.h>
 #include <linux/dca.h>
 
-MODULE_LICENSE("GPL");
+#define DCA_VERSION "1.4"
 
-/* For now we're assuming a single, global, DCA provider for the system. */
+MODULE_VERSION(DCA_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
 
 static DEFINE_SPINLOCK(dca_lock);
 
-static struct dca_provider *global_dca = NULL;
+static LIST_HEAD(dca_providers);
+
+static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
+{
+       struct dca_provider *dca, *ret = NULL;
+
+       list_for_each_entry(dca, &dca_providers, node) {
+               if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
+                       ret = dca;
+                       break;
+               }
+       }
+
+       return ret;
+}
 
 /**
  * dca_add_requester - add a dca client to the list
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
  */
 int dca_add_requester(struct device *dev)
 {
-       int err, slot;
+       struct dca_provider *dca;
+       int err, slot = -ENODEV;
 
-       if (!global_dca)
-               return -ENODEV;
+       if (!dev)
+               return -EFAULT;
 
        spin_lock(&dca_lock);
-       slot = global_dca->ops->add_requester(global_dca, dev);
-       spin_unlock(&dca_lock);
-       if (slot < 0)
+
+       /* check if the requester has not been added already */
+       dca = dca_find_provider_by_dev(dev);
+       if (dca) {
+               spin_unlock(&dca_lock);
+               return -EEXIST;
+       }
+
+       list_for_each_entry(dca, &dca_providers, node) {
+               slot = dca->ops->add_requester(dca, dev);
+               if (slot >= 0)
+                       break;
+       }
+       if (slot < 0) {
+               spin_unlock(&dca_lock);
                return slot;
+       }
 
-       err = dca_sysfs_add_req(global_dca, dev, slot);
+       err = dca_sysfs_add_req(dca, dev, slot);
        if (err) {
-               spin_lock(&dca_lock);
-               global_dca->ops->remove_requester(global_dca, dev);
+               dca->ops->remove_requester(dca, dev);
                spin_unlock(&dca_lock);
                return err;
        }
 
+       spin_unlock(&dca_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(dca_add_requester);
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
  */
 int dca_remove_requester(struct device *dev)
 {
+       struct dca_provider *dca;
        int slot;
-       if (!global_dca)
-               return -ENODEV;
+
+       if (!dev)
+               return -EFAULT;
 
        spin_lock(&dca_lock);
-       slot = global_dca->ops->remove_requester(global_dca, dev);
-       spin_unlock(&dca_lock);
-       if (slot < 0)
+       dca = dca_find_provider_by_dev(dev);
+       if (!dca) {
+               spin_unlock(&dca_lock);
+               return -ENODEV;
+       }
+       slot = dca->ops->remove_requester(dca, dev);
+       if (slot < 0) {
+               spin_unlock(&dca_lock);
                return slot;
+       }
 
-       dca_sysfs_remove_req(global_dca, slot);
+       dca_sysfs_remove_req(dca, slot);
+
+       spin_unlock(&dca_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(dca_remove_requester);
 
 /**
- * dca_get_tag - return the dca tag for the given cpu
+ * dca_common_get_tag - return the dca tag (serves both new and old api)
+ * @dev - the device that wants dca service
  * @cpu - the cpuid as returned by get_cpu()
  */
-u8 dca_get_tag(int cpu)
+u8 dca_common_get_tag(struct device *dev, int cpu)
 {
-       if (!global_dca)
+       struct dca_provider *dca;
+       u8 tag;
+
+       spin_lock(&dca_lock);
+
+       dca = dca_find_provider_by_dev(dev);
+       if (!dca) {
+               spin_unlock(&dca_lock);
                return -ENODEV;
-       return global_dca->ops->get_tag(global_dca, cpu);
+       }
+       tag = dca->ops->get_tag(dca, dev, cpu);
+
+       spin_unlock(&dca_lock);
+       return tag;
+}
+
+/**
+ * dca3_get_tag - return the dca tag to the requester device
+ *                for the given cpu (new api)
+ * @dev - the device that wants dca service
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+u8 dca3_get_tag(struct device *dev, int cpu)
+{
+       if (!dev)
+               return -EFAULT;
+
+       return dca_common_get_tag(dev, cpu);
+}
+EXPORT_SYMBOL_GPL(dca3_get_tag);
+
+/**
+ * dca_get_tag - return the dca tag for the given cpu (old api)
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+u8 dca_get_tag(int cpu)
+{
+       struct device *dev = NULL;
+
+       return dca_common_get_tag(dev, cpu);
 }
 EXPORT_SYMBOL_GPL(dca_get_tag);
 
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
 {
        int err;
 
-       if (global_dca)
-               return -EEXIST;
        err = dca_sysfs_add_provider(dca, dev);
        if (err)
                return err;
-       global_dca = dca;
+       list_add(&dca->node, &dca_providers);
        blocking_notifier_call_chain(&dca_provider_chain,
                                     DCA_PROVIDER_ADD, NULL);
        return 0;
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
  */
 void unregister_dca_provider(struct dca_provider *dca)
 {
-       if (!global_dca)
-               return;
        blocking_notifier_call_chain(&dca_provider_chain,
                                     DCA_PROVIDER_REMOVE, NULL);
-       global_dca = NULL;
+       list_del(&dca->node);
        dca_sysfs_remove_provider(dca);
 }
 EXPORT_SYMBOL_GPL(unregister_dca_provider);
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
 
 static int __init dca_init(void)
 {
+       printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
        return dca_sysfs_init();
 }
 
index 9a70377bfb34e808e954afb266eb5218f8bb7982..7af4b403bd2d12a6f3219f8f8d6615f5de69def1 100644 (file)
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
 int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
 {
        struct device *cd;
+       static int req_count;
 
        cd = device_create_drvdata(dca_class, dca->cd,
                                   MKDEV(0, slot + 1), NULL,
-                                  "requester%d", slot);
+                                  "requester%d", req_count++);
        if (IS_ERR(cd))
                return PTR_ERR(cd);
        return 0;
index 6239c3df30ac53fd877e4779c721068568556e1d..cd303901eb5b20c13b1636ec32614d3ac8800a60 100644 (file)
@@ -4,13 +4,14 @@
 
 menuconfig DMADEVICES
        bool "DMA Engine support"
-       depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
-       depends on !HIGHMEM64G
+       depends on !HIGHMEM64G && HAS_DMA
        help
          DMA engines can do asynchronous data transfers without
          involving the host CPU.  Currently, this framework can be
          used to offload memory copies in the network stack and
-         RAID operations in the MD driver.
+         RAID operations in the MD driver.  This menu only presents
+         DMA Device drivers supported by the configured arch, it may
+         be empty in some cases.
 
 if DMADEVICES
 
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
        help
          Enable support for the Intel(R) IOP Series RAID engines.
 
+config DW_DMAC
+       tristate "Synopsys DesignWare AHB DMA support"
+       depends on AVR32
+       select DMA_ENGINE
+       default y if CPU_AT32AP7000
+       help
+         Support the Synopsys DesignWare AHB DMA controller.  This
+         can be integrated in chips such as the Atmel AT32ap7000.
+
 config FSL_DMA
        bool "Freescale MPC85xx/MPC83xx DMA support"
        depends on PPC
@@ -46,6 +56,14 @@ config FSL_DMA
          MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
          The MPC8349, MPC8360 is also supported.
 
+config MV_XOR
+       bool "Marvell XOR engine support"
+       depends on PLAT_ORION
+       select ASYNC_CORE
+       select DMA_ENGINE
+       ---help---
+         Enable support for the Marvell XOR engine.
+
 config DMA_ENGINE
        bool
 
@@ -55,10 +73,19 @@ comment "DMA Clients"
 config NET_DMA
        bool "Network: TCP receive copy offload"
        depends on DMA_ENGINE && NET
+       default (INTEL_IOATDMA || FSL_DMA)
        help
          This enables the use of DMA engines in the network stack to
          offload receive copy-to-user operations, freeing CPU cycles.
-         Since this is the main user of the DMA engine, it should be enabled;
-         say Y here.
+
+         Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
+         say N.
+
+config DMATEST
+       tristate "DMA Test client"
+       depends on DMA_ENGINE
+       help
+         Simple DMA test client. Say N unless you're debugging a
+         DMA Device driver.
 
 endif
index c8036d94590277d24f994a82c61fccbe51a89f07..14f59527d4f6bc5ab02e750e7580f209506cd7c1 100644 (file)
@@ -1,6 +1,9 @@
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_NET_DMA) += iovlock.o
+obj-$(CONFIG_DMATEST) += dmatest.o
 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
 ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
index 97b329e767983172a7069687d4d8fedb1e28cee7..dc003a3a787d545207fed8be6e81bb4aacd970c1 100644 (file)
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
        enum dma_state_client ack;
 
        /* Find a channel */
-       list_for_each_entry(device, &dma_device_list, global_node)
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               /* Does the client require a specific DMA controller? */
+               if (client->slave && client->slave->dma_dev
+                               && client->slave->dma_dev != device->dev)
+                       continue;
+
                list_for_each_entry(chan, &device->channels, device_node) {
                        if (!dma_chan_satisfies_mask(chan, client->cap_mask))
                                continue;
 
-                       desc = chan->device->device_alloc_chan_resources(chan);
+                       desc = chan->device->device_alloc_chan_resources(
+                                       chan, client);
                        if (desc >= 0) {
                                ack = client->event_callback(client,
                                                chan,
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
                                /* we are done once this client rejects
                                 * an available resource
                                 */
-                               if (ack == DMA_ACK)
+                               if (ack == DMA_ACK) {
                                        dma_chan_get(chan);
-                               else if (ack == DMA_NAK)
+                                       chan->client_count++;
+                               } else if (ack == DMA_NAK)
                                        return;
                        }
                }
+       }
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
                /* client was holding resources for this channel so
                 * free it
                 */
-               if (ack == DMA_ACK)
+               if (ack == DMA_ACK) {
                        dma_chan_put(chan);
+                       chan->client_count--;
+               }
        }
 
        mutex_unlock(&dma_list_mutex);
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
  */
 void dma_async_client_register(struct dma_client *client)
 {
+       /* validate client data */
+       BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
+               !client->slave);
+
        mutex_lock(&dma_list_mutex);
        list_add_tail(&client->global_node, &dma_client_list);
        mutex_unlock(&dma_list_mutex);
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
                        ack = client->event_callback(client, chan,
                                DMA_RESOURCE_REMOVED);
 
-                       if (ack == DMA_ACK)
+                       if (ack == DMA_ACK) {
                                dma_chan_put(chan);
+                               chan->client_count--;
+                       }
                }
 
        list_del(&client->global_node);
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
                !device->device_prep_dma_memset);
        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
                !device->device_prep_dma_interrupt);
+       BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+               !device->device_prep_slave_sg);
+       BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+               !device->device_terminate_all);
 
        BUG_ON(!device->device_alloc_chan_resources);
        BUG_ON(!device->device_free_chan_resources);
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
 
                chan->chan_id = chancnt++;
                chan->dev.class = &dma_devclass;
-               chan->dev.parent = NULL;
+               chan->dev.parent = device->dev;
                snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
                         device->dev_id, chan->chan_id);
 
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
                kref_get(&device->refcount);
                kref_get(&device->refcount);
                kref_init(&chan->refcount);
+               chan->client_count = 0;
                chan->slow_ref = 0;
                INIT_RCU_HEAD(&chan->rcu);
        }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
new file mode 100644 (file)
index 0000000..a08d197
--- /dev/null
@@ -0,0 +1,444 @@
+/*
+ * DMA Engine test module
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/wait.h>
+
+static unsigned int test_buf_size = 16384;
+module_param(test_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static char test_channel[BUS_ID_SIZE];
+module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
+
+static char test_device[BUS_ID_SIZE];
+module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
+
+static unsigned int threads_per_chan = 1;
+module_param(threads_per_chan, uint, S_IRUGO);
+MODULE_PARM_DESC(threads_per_chan,
+               "Number of threads to start per channel (default: 1)");
+
+static unsigned int max_channels;
+module_param(max_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_channels,
+               "Maximum number of channels to use (default: all)");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC            0x80
+#define PATTERN_DST            0x00
+#define PATTERN_COPY           0x40
+#define PATTERN_OVERWRITE      0x20
+#define PATTERN_COUNT_MASK     0x1f
+
+struct dmatest_thread {
+       struct list_head        node;
+       struct task_struct      *task;
+       struct dma_chan         *chan;
+       u8                      *srcbuf;
+       u8                      *dstbuf;
+};
+
+struct dmatest_chan {
+       struct list_head        node;
+       struct dma_chan         *chan;
+       struct list_head        threads;
+};
+
+/*
+ * These are protected by dma_list_mutex since they're only used by
+ * the DMA client event callback
+ */
+static LIST_HEAD(dmatest_channels);
+static unsigned int nr_channels;
+
+static bool dmatest_match_channel(struct dma_chan *chan)
+{
+       if (test_channel[0] == '\0')
+               return true;
+       return strcmp(chan->dev.bus_id, test_channel) == 0;
+}
+
+static bool dmatest_match_device(struct dma_device *device)
+{
+       if (test_device[0] == '\0')
+               return true;
+       return strcmp(device->dev->bus_id, test_device) == 0;
+}
+
+static unsigned long dmatest_random(void)
+{
+       unsigned long buf;
+
+       get_random_bytes(&buf, sizeof(buf));
+       return buf;
+}
+
+static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
+{
+       unsigned int i;
+
+       for (i = 0; i < start; i++)
+               buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+       for ( ; i < start + len; i++)
+               buf[i] = PATTERN_SRC | PATTERN_COPY
+                       | (~i & PATTERN_COUNT_MASK);;
+       for ( ; i < test_buf_size; i++)
+               buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+}
+
+static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
+{
+       unsigned int i;
+
+       for (i = 0; i < start; i++)
+               buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+       for ( ; i < start + len; i++)
+               buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+                       | (~i & PATTERN_COUNT_MASK);
+       for ( ; i < test_buf_size; i++)
+               buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+}
+
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+               unsigned int counter, bool is_srcbuf)
+{
+       u8              diff = actual ^ pattern;
+       u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
+       const char      *thread_name = current->comm;
+
+       if (is_srcbuf)
+               pr_warning("%s: srcbuf[0x%x] overwritten!"
+                               " Expected %02x, got %02x\n",
+                               thread_name, index, expected, actual);
+       else if ((pattern & PATTERN_COPY)
+                       && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+               pr_warning("%s: dstbuf[0x%x] not copied!"
+                               " Expected %02x, got %02x\n",
+                               thread_name, index, expected, actual);
+       else if (diff & PATTERN_SRC)
+               pr_warning("%s: dstbuf[0x%x] was copied!"
+                               " Expected %02x, got %02x\n",
+                               thread_name, index, expected, actual);
+       else
+               pr_warning("%s: dstbuf[0x%x] mismatch!"
+                               " Expected %02x, got %02x\n",
+                               thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 *buf, unsigned int start,
+               unsigned int end, unsigned int counter, u8 pattern,
+               bool is_srcbuf)
+{
+       unsigned int i;
+       unsigned int error_count = 0;
+       u8 actual;
+
+       for (i = start; i < end; i++) {
+               actual = buf[i];
+               if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
+                       if (error_count < 32)
+                               dmatest_mismatch(actual, pattern, i, counter,
+                                               is_srcbuf);
+                       error_count++;
+               }
+               counter++;
+       }
+
+       if (error_count > 32)
+               pr_warning("%s: %u errors suppressed\n",
+                       current->comm, error_count - 32);
+
+       return error_count;
+}
+
+/*
+ * This function repeatedly tests DMA transfers of various lengths and
+ * offsets until it is told to exit by kthread_stop(). There may be
+ * multiple threads running this function in parallel for a single
+ * channel, and there may be multiple channels being tested in
+ * parallel.
+ *
+ * Before each test, the source and destination buffer is initialized
+ * with a known pattern. This pattern is different depending on
+ * whether it's in an area which is supposed to be copied or
+ * overwritten, and different in the source and destination buffers.
+ * So if the DMA engine doesn't copy exactly what we tell it to copy,
+ * we'll notice.
+ */
+static int dmatest_func(void *data)
+{
+       struct dmatest_thread   *thread = data;
+       struct dma_chan         *chan;
+       const char              *thread_name;
+       unsigned int            src_off, dst_off, len;
+       unsigned int            error_count;
+       unsigned int            failed_tests = 0;
+       unsigned int            total_tests = 0;
+       dma_cookie_t            cookie;
+       enum dma_status         status;
+       int                     ret;
+
+       thread_name = current->comm;
+
+       ret = -ENOMEM;
+       thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
+       if (!thread->srcbuf)
+               goto err_srcbuf;
+       thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
+       if (!thread->dstbuf)
+               goto err_dstbuf;
+
+       smp_rmb();
+       chan = thread->chan;
+       dma_chan_get(chan);
+
+       while (!kthread_should_stop()) {
+               total_tests++;
+
+               len = dmatest_random() % test_buf_size + 1;
+               src_off = dmatest_random() % (test_buf_size - len + 1);
+               dst_off = dmatest_random() % (test_buf_size - len + 1);
+
+               dmatest_init_srcbuf(thread->srcbuf, src_off, len);
+               dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
+
+               cookie = dma_async_memcpy_buf_to_buf(chan,
+                               thread->dstbuf + dst_off,
+                               thread->srcbuf + src_off,
+                               len);
+               if (dma_submit_error(cookie)) {
+                       pr_warning("%s: #%u: submit error %d with src_off=0x%x "
+                                       "dst_off=0x%x len=0x%x\n",
+                                       thread_name, total_tests - 1, cookie,
+                                       src_off, dst_off, len);
+                       msleep(100);
+                       failed_tests++;
+                       continue;
+               }
+               dma_async_memcpy_issue_pending(chan);
+
+               do {
+                       msleep(1);
+                       status = dma_async_memcpy_complete(
+                                       chan, cookie, NULL, NULL);
+               } while (status == DMA_IN_PROGRESS);
+
+               if (status == DMA_ERROR) {
+                       pr_warning("%s: #%u: error during copy\n",
+                                       thread_name, total_tests - 1);
+                       failed_tests++;
+                       continue;
+               }
+
+               error_count = 0;
+
+               pr_debug("%s: verifying source buffer...\n", thread_name);
+               error_count += dmatest_verify(thread->srcbuf, 0, src_off,
+                               0, PATTERN_SRC, true);
+               error_count += dmatest_verify(thread->srcbuf, src_off,
+                               src_off + len, src_off,
+                               PATTERN_SRC | PATTERN_COPY, true);
+               error_count += dmatest_verify(thread->srcbuf, src_off + len,
+                               test_buf_size, src_off + len,
+                               PATTERN_SRC, true);
+
+               pr_debug("%s: verifying dest buffer...\n",
+                               thread->task->comm);
+               error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
+                               0, PATTERN_DST, false);
+               error_count += dmatest_verify(thread->dstbuf, dst_off,
+                               dst_off + len, src_off,
+                               PATTERN_SRC | PATTERN_COPY, false);
+               error_count += dmatest_verify(thread->dstbuf, dst_off + len,
+                               test_buf_size, dst_off + len,
+                               PATTERN_DST, false);
+
+               if (error_count) {
+                       pr_warning("%s: #%u: %u errors with "
+                               "src_off=0x%x dst_off=0x%x len=0x%x\n",
+                               thread_name, total_tests - 1, error_count,
+                               src_off, dst_off, len);
+                       failed_tests++;
+               } else {
+                       pr_debug("%s: #%u: No errors with "
+                               "src_off=0x%x dst_off=0x%x len=0x%x\n",
+                               thread_name, total_tests - 1,
+                               src_off, dst_off, len);
+               }
+       }
+
+       ret = 0;
+       dma_chan_put(chan);
+       kfree(thread->dstbuf);
+err_dstbuf:
+       kfree(thread->srcbuf);
+err_srcbuf:
+       pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+                       thread_name, total_tests, failed_tests, ret);
+       return ret;
+}
+
+static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
+{
+       struct dmatest_thread   *thread;
+       struct dmatest_thread   *_thread;
+       int                     ret;
+
+       list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
+               ret = kthread_stop(thread->task);
+               pr_debug("dmatest: thread %s exited with status %d\n",
+                               thread->task->comm, ret);
+               list_del(&thread->node);
+               kfree(thread);
+       }
+       kfree(dtc);
+}
+
+static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
+{
+       struct dmatest_chan     *dtc;
+       struct dmatest_thread   *thread;
+       unsigned int            i;
+
+       dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
+       if (!dtc) {
+               pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
+               return DMA_NAK;
+       }
+
+       dtc->chan = chan;
+       INIT_LIST_HEAD(&dtc->threads);
+
+       for (i = 0; i < threads_per_chan; i++) {
+               thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
+               if (!thread) {
+                       pr_warning("dmatest: No memory for %s-test%u\n",
+                                       chan->dev.bus_id, i);
+                       break;
+               }
+               thread->chan = dtc->chan;
+               smp_wmb();
+               thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
+                               chan->dev.bus_id, i);
+               if (IS_ERR(thread->task)) {
+                       pr_warning("dmatest: Failed to run thread %s-test%u\n",
+                                       chan->dev.bus_id, i);
+                       kfree(thread);
+                       break;
+               }
+
+               /* srcbuf and dstbuf are allocated by the thread itself */
+
+               list_add_tail(&thread->node, &dtc->threads);
+       }
+
+       pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
+
+       list_add_tail(&dtc->node, &dmatest_channels);
+       nr_channels++;
+
+       return DMA_ACK;
+}
+
+static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
+{
+       struct dmatest_chan     *dtc, *_dtc;
+
+       list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+               if (dtc->chan == chan) {
+                       list_del(&dtc->node);
+                       dmatest_cleanup_channel(dtc);
+                       pr_debug("dmatest: lost channel %s\n",
+                                       chan->dev.bus_id);
+                       return DMA_ACK;
+               }
+       }
+
+       return DMA_DUP;
+}
+
+/*
+ * Start testing threads as new channels are assigned to us, and kill
+ * them when the channels go away.
+ *
+ * When we unregister the client, all channels are removed so this
+ * will also take care of cleaning things up when the module is
+ * unloaded.
+ */
+static enum dma_state_client
+dmatest_event(struct dma_client *client, struct dma_chan *chan,
+               enum dma_state state)
+{
+       enum dma_state_client   ack = DMA_NAK;
+
+       switch (state) {
+       case DMA_RESOURCE_AVAILABLE:
+               if (!dmatest_match_channel(chan)
+                               || !dmatest_match_device(chan->device))
+                       ack = DMA_DUP;
+               else if (max_channels && nr_channels >= max_channels)
+                       ack = DMA_NAK;
+               else
+                       ack = dmatest_add_channel(chan);
+               break;
+
+       case DMA_RESOURCE_REMOVED:
+               ack = dmatest_remove_channel(chan);
+               break;
+
+       default:
+               pr_info("dmatest: Unhandled event %u (%s)\n",
+                               state, chan->dev.bus_id);
+               break;
+       }
+
+       return ack;
+}
+
+static struct dma_client dmatest_client = {
+       .event_callback = dmatest_event,
+};
+
+static int __init dmatest_init(void)
+{
+       dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
+       dma_async_client_register(&dmatest_client);
+       dma_async_client_chan_request(&dmatest_client);
+
+       return 0;
+}
+module_init(dmatest_init);
+
+static void __exit dmatest_exit(void)
+{
+       dma_async_client_unregister(&dmatest_client);
+}
+module_exit(dmatest_exit);
+
+MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644 (file)
index 0000000..94df917
--- /dev/null
@@ -0,0 +1,1122 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
+ * AVR32 systems.)
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dw_dmac_regs.h"
+
+/*
+ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
+ * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
+ * of which use ARM any more).  See the "Databook" from Synopsys for
+ * information beyond what licensees probably provide.
+ *
+ * The driver has currently been tested only with the Atmel AT32AP7000,
+ * which does not support descriptor writeback.
+ */
+
+/* NOTE:  DMS+SMS is system-specific. We should get this information
+ * from the platform code somehow.
+ */
+#define DWC_DEFAULT_CTLLO      (DWC_CTLL_DST_MSIZE(0)          \
+                               | DWC_CTLL_SRC_MSIZE(0)         \
+                               | DWC_CTLL_DMS(0)               \
+                               | DWC_CTLL_SMS(1)               \
+                               | DWC_CTLL_LLP_D_EN             \
+                               | DWC_CTLL_LLP_S_EN)
+
+/*
+ * This is configuration-dependent and usually a funny size like 4095.
+ * Let's round it down to the nearest power of two.
+ *
+ * Note that this is a transfer count, i.e. if we transfer 32-bit
+ * words, we can do 8192 bytes per descriptor.
+ *
+ * This parameter is also system-specific.
+ */
+#define DWC_MAX_COUNT  2048U
+
+/*
+ * Number of descriptors to allocate for each channel. This should be
+ * made configurable somehow; preferably, the clients (at least the
+ * ones using slave transfers) should be able to give us a hint.
+ */
+#define NR_DESCS_PER_CHANNEL   64
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Because we're not relying on writeback from the controller (it may not
+ * even be configured into the core!) we don't need to use dma_pool.  These
+ * descriptors -- and associated data -- are cacheable.  We do need to make
+ * sure their dcache entries are written back before handing them off to
+ * the controller, though.
+ */
+
+static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
+{
+       return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+}
+
+static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
+{
+       return list_entry(dwc->queue.next, struct dw_desc, desc_node);
+}
+
+static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+{
+       struct dw_desc *desc, *_desc;
+       struct dw_desc *ret = NULL;
+       unsigned int i = 0;
+
+       spin_lock_bh(&dwc->lock);
+       list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
+               if (async_tx_test_ack(&desc->txd)) {
+                       list_del(&desc->desc_node);
+                       ret = desc;
+                       break;
+               }
+               dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
+               i++;
+       }
+       spin_unlock_bh(&dwc->lock);
+
+       dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
+
+       return ret;
+}
+
+static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+       struct dw_desc  *child;
+
+       list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+               dma_sync_single_for_cpu(dwc->chan.dev.parent,
+                               child->txd.phys, sizeof(child->lli),
+                               DMA_TO_DEVICE);
+       dma_sync_single_for_cpu(dwc->chan.dev.parent,
+                       desc->txd.phys, sizeof(desc->lli),
+                       DMA_TO_DEVICE);
+}
+
+/*
+ * Move a descriptor, including any children, to the free list.
+ * `desc' must not be on any lists.
+ */
+static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+       if (desc) {
+               struct dw_desc *child;
+
+               dwc_sync_desc_for_cpu(dwc, desc);
+
+               spin_lock_bh(&dwc->lock);
+               list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+                       dev_vdbg(&dwc->chan.dev,
+                                       "moving child desc %p to freelist\n",
+                                       child);
+               list_splice_init(&desc->txd.tx_list, &dwc->free_list);
+               dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
+               list_add(&desc->desc_node, &dwc->free_list);
+               spin_unlock_bh(&dwc->lock);
+       }
+}
+
+/* Called with dwc->lock held and bh disabled */
+static dma_cookie_t
+dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+       dma_cookie_t cookie = dwc->chan.cookie;
+
+       if (++cookie < 0)
+               cookie = 1;
+
+       dwc->chan.cookie = cookie;
+       desc->txd.cookie = cookie;
+
+       return cookie;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* Called with dwc->lock held and bh disabled */
+static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
+{
+       struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
+
+       /* ASSERT:  channel is idle */
+       if (dma_readl(dw, CH_EN) & dwc->mask) {
+               dev_err(&dwc->chan.dev,
+                       "BUG: Attempted to start non-idle channel\n");
+               dev_err(&dwc->chan.dev,
+                       "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+                       channel_readl(dwc, SAR),
+                       channel_readl(dwc, DAR),
+                       channel_readl(dwc, LLP),
+                       channel_readl(dwc, CTL_HI),
+                       channel_readl(dwc, CTL_LO));
+
+               /* The tasklet will hopefully advance the queue... */
+               return;
+       }
+
+       channel_writel(dwc, LLP, first->txd.phys);
+       channel_writel(dwc, CTL_LO,
+                       DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+       channel_writel(dwc, CTL_HI, 0);
+       channel_set_bit(dw, CH_EN, dwc->mask);
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+       dma_async_tx_callback           callback;
+       void                            *param;
+       struct dma_async_tx_descriptor  *txd = &desc->txd;
+
+       dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
+
+       dwc->completed = txd->cookie;
+       callback = txd->callback;
+       param = txd->callback_param;
+
+       dwc_sync_desc_for_cpu(dwc, desc);
+       list_splice_init(&txd->tx_list, &dwc->free_list);
+       list_move(&desc->desc_node, &dwc->free_list);
+
+       /*
+        * We use dma_unmap_page() regardless of how the buffers were
+        * mapped before they were submitted...
+        */
+       if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
+               dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
+                               DMA_FROM_DEVICE);
+       if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
+               dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
+                               DMA_TO_DEVICE);
+
+       /*
+        * The API requires that no submissions are done from a
+        * callback, so we don't need to drop the lock here
+        */
+       if (callback)
+               callback(param);
+}
+
+static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+       struct dw_desc *desc, *_desc;
+       LIST_HEAD(list);
+
+       if (dma_readl(dw, CH_EN) & dwc->mask) {
+               dev_err(&dwc->chan.dev,
+                       "BUG: XFER bit set, but channel not idle!\n");
+
+               /* Try to continue after resetting the channel... */
+               channel_clear_bit(dw, CH_EN, dwc->mask);
+               while (dma_readl(dw, CH_EN) & dwc->mask)
+                       cpu_relax();
+       }
+
+       /*
+        * Submit queued descriptors ASAP, i.e. before we go through
+        * the completed ones.
+        */
+       if (!list_empty(&dwc->queue))
+               dwc_dostart(dwc, dwc_first_queued(dwc));
+       list_splice_init(&dwc->active_list, &list);
+       list_splice_init(&dwc->queue, &dwc->active_list);
+
+       list_for_each_entry_safe(desc, _desc, &list, desc_node)
+               dwc_descriptor_complete(dwc, desc);
+}
+
+static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+       dma_addr_t llp;
+       struct dw_desc *desc, *_desc;
+       struct dw_desc *child;
+       u32 status_xfer;
+
+       /*
+        * Clear block interrupt flag before scanning so that we don't
+        * miss any, and read LLP before RAW_XFER to ensure it is
+        * valid if we decide to scan the list.
+        */
+       dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+       llp = channel_readl(dwc, LLP);
+       status_xfer = dma_readl(dw, RAW.XFER);
+
+       if (status_xfer & dwc->mask) {
+               /* Everything we've submitted is done */
+               dma_writel(dw, CLEAR.XFER, dwc->mask);
+               dwc_complete_all(dw, dwc);
+               return;
+       }
+
+       dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
+
+       list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+               if (desc->lli.llp == llp)
+                       /* This one is currently in progress */
+                       return;
+
+               list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+                       if (child->lli.llp == llp)
+                               /* Currently in progress */
+                               return;
+
+               /*
+                * No descriptors so far seem to be in progress, i.e.
+                * this one must be done.
+                */
+               dwc_descriptor_complete(dwc, desc);
+       }
+
+       dev_err(&dwc->chan.dev,
+               "BUG: All descriptors done, but channel not idle!\n");
+
+       /* Try to continue after resetting the channel... */
+       channel_clear_bit(dw, CH_EN, dwc->mask);
+       while (dma_readl(dw, CH_EN) & dwc->mask)
+               cpu_relax();
+
+       if (!list_empty(&dwc->queue)) {
+               dwc_dostart(dwc, dwc_first_queued(dwc));
+               list_splice_init(&dwc->queue, &dwc->active_list);
+       }
+}
+
+static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+{
+       dev_printk(KERN_CRIT, &dwc->chan.dev,
+                       "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+                       lli->sar, lli->dar, lli->llp,
+                       lli->ctlhi, lli->ctllo);
+}
+
+static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+       struct dw_desc *bad_desc;
+       struct dw_desc *child;
+
+       dwc_scan_descriptors(dw, dwc);
+
+       /*
+        * The descriptor currently at the head of the active list is
+        * borked. Since we don't have any way to report errors, we'll
+        * just have to scream loudly and try to carry on.
+        */
+       bad_desc = dwc_first_active(dwc);
+       list_del_init(&bad_desc->desc_node);
+       list_splice_init(&dwc->queue, dwc->active_list.prev);
+
+       /* Clear the error flag and try to restart the controller */
+       dma_writel(dw, CLEAR.ERROR, dwc->mask);
+       if (!list_empty(&dwc->active_list))
+               dwc_dostart(dwc, dwc_first_active(dwc));
+
+       /*
+        * KERN_CRITICAL may seem harsh, but since this only happens
+        * when someone submits a bad physical address in a
+        * descriptor, we should consider ourselves lucky that the
+        * controller flagged an error instead of scribbling over
+        * random memory locations.
+        */
+       dev_printk(KERN_CRIT, &dwc->chan.dev,
+                       "Bad descriptor submitted for DMA!\n");
+       dev_printk(KERN_CRIT, &dwc->chan.dev,
+                       "  cookie: %d\n", bad_desc->txd.cookie);
+       dwc_dump_lli(dwc, &bad_desc->lli);
+       list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+               dwc_dump_lli(dwc, &child->lli);
+
+       /* Pretend the descriptor completed successfully */
+       dwc_descriptor_complete(dwc, bad_desc);
+}
+
+static void dw_dma_tasklet(unsigned long data)
+{
+       struct dw_dma *dw = (struct dw_dma *)data;
+       struct dw_dma_chan *dwc;
+       u32 status_block;
+       u32 status_xfer;
+       u32 status_err;
+       int i;
+
+       status_block = dma_readl(dw, RAW.BLOCK);
+       status_xfer = dma_readl(dw, RAW.BLOCK);
+       status_err = dma_readl(dw, RAW.ERROR);
+
+       dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
+                       status_block, status_err);
+
+       for (i = 0; i < dw->dma.chancnt; i++) {
+               dwc = &dw->chan[i];
+               spin_lock(&dwc->lock);
+               if (status_err & (1 << i))
+                       dwc_handle_error(dw, dwc);
+               else if ((status_block | status_xfer) & (1 << i))
+                       dwc_scan_descriptors(dw, dwc);
+               spin_unlock(&dwc->lock);
+       }
+
+       /*
+        * Re-enable interrupts. Block Complete interrupts are only
+        * enabled if the INT_EN bit in the descriptor is set. This
+        * will trigger a scan before the whole list is done.
+        */
+       channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+       channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+       channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+}
+
+static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+{
+       struct dw_dma *dw = dev_id;
+       u32 status;
+
+       dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
+                       dma_readl(dw, STATUS_INT));
+
+       /*
+        * Just disable the interrupts. We'll turn them back on in the
+        * softirq handler.
+        */
+       channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+       status = dma_readl(dw, STATUS_INT);
+       if (status) {
+               dev_err(dw->dma.dev,
+                       "BUG: Unexpected interrupts pending: 0x%x\n",
+                       status);
+
+               /* Try to recover */
+               channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+               channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
+               channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+               channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+               channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+       }
+
+       tasklet_schedule(&dw->tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct dw_desc          *desc = txd_to_dw_desc(tx);
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
+       dma_cookie_t            cookie;
+
+       spin_lock_bh(&dwc->lock);
+       cookie = dwc_assign_cookie(dwc, desc);
+
+       /*
+        * REVISIT: We should attempt to chain as many descriptors as
+        * possible, perhaps even appending to those already submitted
+        * for DMA. But this is hard to do in a race-free manner.
+        */
+       if (list_empty(&dwc->active_list)) {
+               dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
+                               desc->txd.cookie);
+               dwc_dostart(dwc, desc);
+               list_add_tail(&desc->desc_node, &dwc->active_list);
+       } else {
+               dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
+                               desc->txd.cookie);
+
+               list_add_tail(&desc->desc_node, &dwc->queue);
+       }
+
+       spin_unlock_bh(&dwc->lock);
+
+       return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+               size_t len, unsigned long flags)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_desc          *desc;
+       struct dw_desc          *first;
+       struct dw_desc          *prev;
+       size_t                  xfer_count;
+       size_t                  offset;
+       unsigned int            src_width;
+       unsigned int            dst_width;
+       u32                     ctllo;
+
+       dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
+                       dest, src, len, flags);
+
+       if (unlikely(!len)) {
+               dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
+               return NULL;
+       }
+
+       /*
+        * We can be a lot more clever here, but this should take care
+        * of the most common optimization.
+        */
+       if (!((src | dest  | len) & 3))
+               src_width = dst_width = 2;
+       else if (!((src | dest | len) & 1))
+               src_width = dst_width = 1;
+       else
+               src_width = dst_width = 0;
+
+       ctllo = DWC_DEFAULT_CTLLO
+                       | DWC_CTLL_DST_WIDTH(dst_width)
+                       | DWC_CTLL_SRC_WIDTH(src_width)
+                       | DWC_CTLL_DST_INC
+                       | DWC_CTLL_SRC_INC
+                       | DWC_CTLL_FC_M2M;
+       prev = first = NULL;
+
+       for (offset = 0; offset < len; offset += xfer_count << src_width) {
+               xfer_count = min_t(size_t, (len - offset) >> src_width,
+                               DWC_MAX_COUNT);
+
+               desc = dwc_desc_get(dwc);
+               if (!desc)
+                       goto err_desc_get;
+
+               desc->lli.sar = src + offset;
+               desc->lli.dar = dest + offset;
+               desc->lli.ctllo = ctllo;
+               desc->lli.ctlhi = xfer_count;
+
+               if (!first) {
+                       first = desc;
+               } else {
+                       prev->lli.llp = desc->txd.phys;
+                       dma_sync_single_for_device(chan->dev.parent,
+                                       prev->txd.phys, sizeof(prev->lli),
+                                       DMA_TO_DEVICE);
+                       list_add_tail(&desc->desc_node,
+                                       &first->txd.tx_list);
+               }
+               prev = desc;
+       }
+
+
+       if (flags & DMA_PREP_INTERRUPT)
+               /* Trigger interrupt after last block */
+               prev->lli.ctllo |= DWC_CTLL_INT_EN;
+
+       prev->lli.llp = 0;
+       dma_sync_single_for_device(chan->dev.parent,
+                       prev->txd.phys, sizeof(prev->lli),
+                       DMA_TO_DEVICE);
+
+       first->txd.flags = flags;
+       first->len = len;
+
+       return &first->txd;
+
+err_desc_get:
+       dwc_desc_put(dwc, first);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned long flags)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma_slave     *dws = dwc->dws;
+       struct dw_desc          *prev;
+       struct dw_desc          *first;
+       u32                     ctllo;
+       dma_addr_t              reg;
+       unsigned int            reg_width;
+       unsigned int            mem_width;
+       unsigned int            i;
+       struct scatterlist      *sg;
+       size_t                  total_len = 0;
+
+       dev_vdbg(&chan->dev, "prep_dma_slave\n");
+
+       if (unlikely(!dws || !sg_len))
+               return NULL;
+
+       reg_width = dws->slave.reg_width;
+       prev = first = NULL;
+
+       sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
+
+       switch (direction) {
+       case DMA_TO_DEVICE:
+               ctllo = (DWC_DEFAULT_CTLLO
+                               | DWC_CTLL_DST_WIDTH(reg_width)
+                               | DWC_CTLL_DST_FIX
+                               | DWC_CTLL_SRC_INC
+                               | DWC_CTLL_FC_M2P);
+               reg = dws->slave.tx_reg;
+               for_each_sg(sgl, sg, sg_len, i) {
+                       struct dw_desc  *desc;
+                       u32             len;
+                       u32             mem;
+
+                       desc = dwc_desc_get(dwc);
+                       if (!desc) {
+                               dev_err(&chan->dev,
+                                       "not enough descriptors available\n");
+                               goto err_desc_get;
+                       }
+
+                       mem = sg_phys(sg);
+                       len = sg_dma_len(sg);
+                       mem_width = 2;
+                       if (unlikely(mem & 3 || len & 3))
+                               mem_width = 0;
+
+                       desc->lli.sar = mem;
+                       desc->lli.dar = reg;
+                       desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+                       desc->lli.ctlhi = len >> mem_width;
+
+                       if (!first) {
+                               first = desc;
+                       } else {
+                               prev->lli.llp = desc->txd.phys;
+                               dma_sync_single_for_device(chan->dev.parent,
+                                               prev->txd.phys,
+                                               sizeof(prev->lli),
+                                               DMA_TO_DEVICE);
+                               list_add_tail(&desc->desc_node,
+                                               &first->txd.tx_list);
+                       }
+                       prev = desc;
+                       total_len += len;
+               }
+               break;
+       case DMA_FROM_DEVICE:
+               ctllo = (DWC_DEFAULT_CTLLO
+                               | DWC_CTLL_SRC_WIDTH(reg_width)
+                               | DWC_CTLL_DST_INC
+                               | DWC_CTLL_SRC_FIX
+                               | DWC_CTLL_FC_P2M);
+
+               reg = dws->slave.rx_reg;
+               for_each_sg(sgl, sg, sg_len, i) {
+                       struct dw_desc  *desc;
+                       u32             len;
+                       u32             mem;
+
+                       desc = dwc_desc_get(dwc);
+                       if (!desc) {
+                               dev_err(&chan->dev,
+                                       "not enough descriptors available\n");
+                               goto err_desc_get;
+                       }
+
+                       mem = sg_phys(sg);
+                       len = sg_dma_len(sg);
+                       mem_width = 2;
+                       if (unlikely(mem & 3 || len & 3))
+                               mem_width = 0;
+
+                       desc->lli.sar = reg;
+                       desc->lli.dar = mem;
+                       desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+                       desc->lli.ctlhi = len >> reg_width;
+
+                       if (!first) {
+                               first = desc;
+                       } else {
+                               prev->lli.llp = desc->txd.phys;
+                               dma_sync_single_for_device(chan->dev.parent,
+                                               prev->txd.phys,
+                                               sizeof(prev->lli),
+                                               DMA_TO_DEVICE);
+                               list_add_tail(&desc->desc_node,
+                                               &first->txd.tx_list);
+                       }
+                       prev = desc;
+                       total_len += len;
+               }
+               break;
+       default:
+               return NULL;
+       }
+
+       if (flags & DMA_PREP_INTERRUPT)
+               /* Trigger interrupt after last block */
+               prev->lli.ctllo |= DWC_CTLL_INT_EN;
+
+       prev->lli.llp = 0;
+       dma_sync_single_for_device(chan->dev.parent,
+                       prev->txd.phys, sizeof(prev->lli),
+                       DMA_TO_DEVICE);
+
+       first->len = total_len;
+
+       return &first->txd;
+
+err_desc_get:
+       dwc_desc_put(dwc, first);
+       return NULL;
+}
+
+static void dwc_terminate_all(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(chan->device);
+       struct dw_desc          *desc, *_desc;
+       LIST_HEAD(list);
+
+       /*
+        * This is only called when something went wrong elsewhere, so
+        * we don't really care about the data. Just disable the
+        * channel. We still have to poll the channel enable bit due
+        * to AHB/HSB limitations.
+        */
+       spin_lock_bh(&dwc->lock);
+
+       channel_clear_bit(dw, CH_EN, dwc->mask);
+
+       while (dma_readl(dw, CH_EN) & dwc->mask)
+               cpu_relax();
+
+       /* active_list entries will end up before queued entries */
+       list_splice_init(&dwc->queue, &list);
+       list_splice_init(&dwc->active_list, &list);
+
+       spin_unlock_bh(&dwc->lock);
+
+       /* Flush all pending and queued descriptors */
+       list_for_each_entry_safe(desc, _desc, &list, desc_node)
+               dwc_descriptor_complete(dwc, desc);
+}
+
+static enum dma_status
+dwc_is_tx_complete(struct dma_chan *chan,
+               dma_cookie_t cookie,
+               dma_cookie_t *done, dma_cookie_t *used)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       dma_cookie_t            last_used;
+       dma_cookie_t            last_complete;
+       int                     ret;
+
+       last_complete = dwc->completed;
+       last_used = chan->cookie;
+
+       ret = dma_async_is_complete(cookie, last_complete, last_used);
+       if (ret != DMA_SUCCESS) {
+               dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+
+               last_complete = dwc->completed;
+               last_used = chan->cookie;
+
+               ret = dma_async_is_complete(cookie, last_complete, last_used);
+       }
+
+       if (done)
+               *done = last_complete;
+       if (used)
+               *used = last_used;
+
+       return ret;
+}
+
+static void dwc_issue_pending(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+
+       spin_lock_bh(&dwc->lock);
+       if (!list_empty(&dwc->queue))
+               dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+       spin_unlock_bh(&dwc->lock);
+}
+
+static int dwc_alloc_chan_resources(struct dma_chan *chan,
+               struct dma_client *client)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(chan->device);
+       struct dw_desc          *desc;
+       struct dma_slave        *slave;
+       struct dw_dma_slave     *dws;
+       int                     i;
+       u32                     cfghi;
+       u32                     cfglo;
+
+       dev_vdbg(&chan->dev, "alloc_chan_resources\n");
+
+       /* Channels doing slave DMA can only handle one client. */
+       if (dwc->dws || client->slave) {
+               if (chan->client_count)
+                       return -EBUSY;
+       }
+
+       /* ASSERT:  channel is idle */
+       if (dma_readl(dw, CH_EN) & dwc->mask) {
+               dev_dbg(&chan->dev, "DMA channel not idle?\n");
+               return -EIO;
+       }
+
+       dwc->completed = chan->cookie = 1;
+
+       cfghi = DWC_CFGH_FIFO_MODE;
+       cfglo = 0;
+
+       slave = client->slave;
+       if (slave) {
+               /*
+                * We need controller-specific data to set up slave
+                * transfers.
+                */
+               BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
+
+               dws = container_of(slave, struct dw_dma_slave, slave);
+
+               dwc->dws = dws;
+               cfghi = dws->cfg_hi;
+               cfglo = dws->cfg_lo;
+       } else {
+               dwc->dws = NULL;
+       }
+
+       channel_writel(dwc, CFG_LO, cfglo);
+       channel_writel(dwc, CFG_HI, cfghi);
+
+       /*
+        * NOTE: some controllers may have additional features that we
+        * need to initialize here, like "scatter-gather" (which
+        * doesn't mean what you think it means), and status writeback.
+        */
+
+       spin_lock_bh(&dwc->lock);
+       i = dwc->descs_allocated;
+       while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+               spin_unlock_bh(&dwc->lock);
+
+               desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
+               if (!desc) {
+                       dev_info(&chan->dev,
+                               "only allocated %d descriptors\n", i);
+                       spin_lock_bh(&dwc->lock);
+                       break;
+               }
+
+               dma_async_tx_descriptor_init(&desc->txd, chan);
+               desc->txd.tx_submit = dwc_tx_submit;
+               desc->txd.flags = DMA_CTRL_ACK;
+               INIT_LIST_HEAD(&desc->txd.tx_list);
+               desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
+                               sizeof(desc->lli), DMA_TO_DEVICE);
+               dwc_desc_put(dwc, desc);
+
+               spin_lock_bh(&dwc->lock);
+               i = ++dwc->descs_allocated;
+       }
+
+       /* Enable interrupts */
+       channel_set_bit(dw, MASK.XFER, dwc->mask);
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+       channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+       spin_unlock_bh(&dwc->lock);
+
+       dev_dbg(&chan->dev,
+               "alloc_chan_resources allocated %d descriptors\n", i);
+
+       return i;
+}
+
+static void dwc_free_chan_resources(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(chan->device);
+       struct dw_desc          *desc, *_desc;
+       LIST_HEAD(list);
+
+       dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
+                       dwc->descs_allocated);
+
+       /* ASSERT:  channel is idle */
+       BUG_ON(!list_empty(&dwc->active_list));
+       BUG_ON(!list_empty(&dwc->queue));
+       BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
+
+       spin_lock_bh(&dwc->lock);
+       list_splice_init(&dwc->free_list, &list);
+       dwc->descs_allocated = 0;
+       dwc->dws = NULL;
+
+       /* Disable interrupts */
+       channel_clear_bit(dw, MASK.XFER, dwc->mask);
+       channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
+       channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+
+       spin_unlock_bh(&dwc->lock);
+
+       list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+               dev_vdbg(&chan->dev, "  freeing descriptor %p\n", desc);
+               dma_unmap_single(chan->dev.parent, desc->txd.phys,
+                               sizeof(desc->lli), DMA_TO_DEVICE);
+               kfree(desc);
+       }
+
+       dev_vdbg(&chan->dev, "free_chan_resources done\n");
+}
+
+/*----------------------------------------------------------------------*/
+
+static void dw_dma_off(struct dw_dma *dw)
+{
+       dma_writel(dw, CFG, 0);
+
+       channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+       while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
+               cpu_relax();
+}
+
+static int __init dw_probe(struct platform_device *pdev)
+{
+       struct dw_dma_platform_data *pdata;
+       struct resource         *io;
+       struct dw_dma           *dw;
+       size_t                  size;
+       int                     irq;
+       int                     err;
+       int                     i;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+               return -EINVAL;
+
+       io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!io)
+               return -EINVAL;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       size = sizeof(struct dw_dma);
+       size += pdata->nr_channels * sizeof(struct dw_dma_chan);
+       dw = kzalloc(size, GFP_KERNEL);
+       if (!dw)
+               return -ENOMEM;
+
+       if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
+               err = -EBUSY;
+               goto err_kfree;
+       }
+
+       memset(dw, 0, sizeof *dw);
+
+       dw->regs = ioremap(io->start, DW_REGLEN);
+       if (!dw->regs) {
+               err = -ENOMEM;
+               goto err_release_r;
+       }
+
+       dw->clk = clk_get(&pdev->dev, "hclk");
+       if (IS_ERR(dw->clk)) {
+               err = PTR_ERR(dw->clk);
+               goto err_clk;
+       }
+       clk_enable(dw->clk);
+
+       /* force dma off, just in case */
+       dw_dma_off(dw);
+
+       err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
+       if (err)
+               goto err_irq;
+
+       platform_set_drvdata(pdev, dw);
+
+       tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+
+       dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
+
+       INIT_LIST_HEAD(&dw->dma.channels);
+       for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
+               struct dw_dma_chan      *dwc = &dw->chan[i];
+
+               dwc->chan.device = &dw->dma;
+               dwc->chan.cookie = dwc->completed = 1;
+               dwc->chan.chan_id = i;
+               list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
+
+               dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
+               spin_lock_init(&dwc->lock);
+               dwc->mask = 1 << i;
+
+               INIT_LIST_HEAD(&dwc->active_list);
+               INIT_LIST_HEAD(&dwc->queue);
+               INIT_LIST_HEAD(&dwc->free_list);
+
+               channel_clear_bit(dw, CH_EN, dwc->mask);
+       }
+
+       /* Clear/disable all interrupts on all channels. */
+       dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
+       dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
+       dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
+       dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
+       dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
+
+       channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+       dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+       dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+       dw->dma.dev = &pdev->dev;
+       dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
+       dw->dma.device_free_chan_resources = dwc_free_chan_resources;
+
+       dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
+
+       dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
+       dw->dma.device_terminate_all = dwc_terminate_all;
+
+       dw->dma.device_is_tx_complete = dwc_is_tx_complete;
+       dw->dma.device_issue_pending = dwc_issue_pending;
+
+       dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
+       printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
+                       pdev->dev.bus_id, dw->dma.chancnt);
+
+       dma_async_device_register(&dw->dma);
+
+       return 0;
+
+err_irq:
+       clk_disable(dw->clk);
+       clk_put(dw->clk);
+err_clk:
+       iounmap(dw->regs);
+       dw->regs = NULL;
+err_release_r:
+       release_resource(io);
+err_kfree:
+       kfree(dw);
+       return err;
+}
+
+static int __exit dw_remove(struct platform_device *pdev)
+{
+       struct dw_dma           *dw = platform_get_drvdata(pdev);
+       struct dw_dma_chan      *dwc, *_dwc;
+       struct resource         *io;
+
+       dw_dma_off(dw);
+       dma_async_device_unregister(&dw->dma);
+
+       free_irq(platform_get_irq(pdev, 0), dw);
+       tasklet_kill(&dw->tasklet);
+
+       list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
+                       chan.device_node) {
+               list_del(&dwc->chan.device_node);
+               channel_clear_bit(dw, CH_EN, dwc->mask);
+       }
+
+       clk_disable(dw->clk);
+       clk_put(dw->clk);
+
+       iounmap(dw->regs);
+       dw->regs = NULL;
+
+       io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(io->start, DW_REGLEN);
+
+       kfree(dw);
+
+       return 0;
+}
+
+static void dw_shutdown(struct platform_device *pdev)
+{
+       struct dw_dma   *dw = platform_get_drvdata(pdev);
+
+       dw_dma_off(platform_get_drvdata(pdev));
+       clk_disable(dw->clk);
+}
+
+static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+{
+       struct dw_dma   *dw = platform_get_drvdata(pdev);
+
+       dw_dma_off(platform_get_drvdata(pdev));
+       clk_disable(dw->clk);
+       return 0;
+}
+
+static int dw_resume_early(struct platform_device *pdev)
+{
+       struct dw_dma   *dw = platform_get_drvdata(pdev);
+
+       clk_enable(dw->clk);
+       dma_writel(dw, CFG, DW_CFG_DMA_EN);
+       return 0;
+
+}
+
+static struct platform_driver dw_driver = {
+       .remove         = __exit_p(dw_remove),
+       .shutdown       = dw_shutdown,
+       .suspend_late   = dw_suspend_late,
+       .resume_early   = dw_resume_early,
+       .driver = {
+               .name   = "dw_dmac",
+       },
+};
+
+static int __init dw_init(void)
+{
+       return platform_driver_probe(&dw_driver, dw_probe);
+}
+module_init(dw_init);
+
+static void __exit dw_exit(void)
+{
+       platform_driver_unregister(&dw_driver);
+}
+module_exit(dw_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644 (file)
index 0000000..00fdd18
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * Driver for the Synopsys DesignWare AHB DMA Controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dw_dmac.h>
+
+#define DW_DMA_MAX_NR_CHANNELS 8
+
+/*
+ * Redefine this macro to handle differences between 32- and 64-bit
+ * addressing, big vs. little endian, etc.
+ */
+#define DW_REG(name)           u32 name; u32 __pad_##name
+
+/* Hardware register definitions. */
+struct dw_dma_chan_regs {
+       DW_REG(SAR);            /* Source Address Register */
+       DW_REG(DAR);            /* Destination Address Register */
+       DW_REG(LLP);            /* Linked List Pointer */
+       u32     CTL_LO;         /* Control Register Low */
+       u32     CTL_HI;         /* Control Register High */
+       DW_REG(SSTAT);
+       DW_REG(DSTAT);
+       DW_REG(SSTATAR);
+       DW_REG(DSTATAR);
+       u32     CFG_LO;         /* Configuration Register Low */
+       u32     CFG_HI;         /* Configuration Register High */
+       DW_REG(SGR);
+       DW_REG(DSR);
+};
+
+struct dw_dma_irq_regs {
+       DW_REG(XFER);
+       DW_REG(BLOCK);
+       DW_REG(SRC_TRAN);
+       DW_REG(DST_TRAN);
+       DW_REG(ERROR);
+};
+
+struct dw_dma_regs {
+       /* per-channel registers */
+       struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
+
+       /* irq handling */
+       struct dw_dma_irq_regs  RAW;            /* r */
+       struct dw_dma_irq_regs  STATUS;         /* r (raw & mask) */
+       struct dw_dma_irq_regs  MASK;           /* rw (set = irq enabled) */
+       struct dw_dma_irq_regs  CLEAR;          /* w (ack, affects "raw") */
+
+       DW_REG(STATUS_INT);                     /* r */
+
+       /* software handshaking */
+       DW_REG(REQ_SRC);
+       DW_REG(REQ_DST);
+       DW_REG(SGL_REQ_SRC);
+       DW_REG(SGL_REQ_DST);
+       DW_REG(LAST_SRC);
+       DW_REG(LAST_DST);
+
+       /* miscellaneous */
+       DW_REG(CFG);
+       DW_REG(CH_EN);
+       DW_REG(ID);
+       DW_REG(TEST);
+
+       /* optional encoded params, 0x3c8..0x3 */
+};
+
+/* Bitfields in CTL_LO */
+#define DWC_CTLL_INT_EN                (1 << 0)        /* irqs enabled? */
+#define DWC_CTLL_DST_WIDTH(n)  ((n)<<1)        /* bytes per element */
+#define DWC_CTLL_SRC_WIDTH(n)  ((n)<<4)
+#define DWC_CTLL_DST_INC       (0<<7)          /* DAR update/not */
+#define DWC_CTLL_DST_DEC       (1<<7)
+#define DWC_CTLL_DST_FIX       (2<<7)
+#define DWC_CTLL_SRC_INC       (0<<7)          /* SAR update/not */
+#define DWC_CTLL_SRC_DEC       (1<<9)
+#define DWC_CTLL_SRC_FIX       (2<<9)
+#define DWC_CTLL_DST_MSIZE(n)  ((n)<<11)       /* burst, #elements */
+#define DWC_CTLL_SRC_MSIZE(n)  ((n)<<14)
+#define DWC_CTLL_S_GATH_EN     (1 << 17)       /* src gather, !FIX */
+#define DWC_CTLL_D_SCAT_EN     (1 << 18)       /* dst scatter, !FIX */
+#define DWC_CTLL_FC_M2M                (0 << 20)       /* mem-to-mem */
+#define DWC_CTLL_FC_M2P                (1 << 20)       /* mem-to-periph */
+#define DWC_CTLL_FC_P2M                (2 << 20)       /* periph-to-mem */
+#define DWC_CTLL_FC_P2P                (3 << 20)       /* periph-to-periph */
+/* plus 4 transfer types for peripheral-as-flow-controller */
+#define DWC_CTLL_DMS(n)                ((n)<<23)       /* dst master select */
+#define DWC_CTLL_SMS(n)                ((n)<<25)       /* src master select */
+#define DWC_CTLL_LLP_D_EN      (1 << 27)       /* dest block chain */
+#define DWC_CTLL_LLP_S_EN      (1 << 28)       /* src block chain */
+
+/* Bitfields in CTL_HI */
+#define DWC_CTLH_DONE          0x00001000
+#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
+
+/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGL_CH_SUSP       (1 << 8)        /* pause xfer */
+#define DWC_CFGL_FIFO_EMPTY    (1 << 9)        /* pause xfer */
+#define DWC_CFGL_HS_DST                (1 << 10)       /* handshake w/dst */
+#define DWC_CFGL_HS_SRC                (1 << 11)       /* handshake w/src */
+#define DWC_CFGL_MAX_BURST(x)  ((x) << 20)
+#define DWC_CFGL_RELOAD_SAR    (1 << 30)
+#define DWC_CFGL_RELOAD_DAR    (1 << 31)
+
+/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGH_DS_UPD_EN     (1 << 5)
+#define DWC_CFGH_SS_UPD_EN     (1 << 6)
+
+/* Bitfields in SGR */
+#define DWC_SGR_SGI(x)         ((x) << 0)
+#define DWC_SGR_SGC(x)         ((x) << 20)
+
+/* Bitfields in DSR */
+#define DWC_DSR_DSI(x)         ((x) << 0)
+#define DWC_DSR_DSC(x)         ((x) << 20)
+
+/* Bitfields in CFG */
+#define DW_CFG_DMA_EN          (1 << 0)
+
+#define DW_REGLEN              0x400
+
+struct dw_dma_chan {
+       struct dma_chan         chan;
+       void __iomem            *ch_regs;
+       u8                      mask;
+
+       spinlock_t              lock;
+
+       /* these other elements are all protected by lock */
+       dma_cookie_t            completed;
+       struct list_head        active_list;
+       struct list_head        queue;
+       struct list_head        free_list;
+
+       struct dw_dma_slave     *dws;
+
+       unsigned int            descs_allocated;
+};
+
+static inline struct dw_dma_chan_regs __iomem *
+__dwc_regs(struct dw_dma_chan *dwc)
+{
+       return dwc->ch_regs;
+}
+
+#define channel_readl(dwc, name) \
+       __raw_readl(&(__dwc_regs(dwc)->name))
+#define channel_writel(dwc, name, val) \
+       __raw_writel((val), &(__dwc_regs(dwc)->name))
+
+static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct dw_dma_chan, chan);
+}
+
+
+struct dw_dma {
+       struct dma_device       dma;
+       void __iomem            *regs;
+       struct tasklet_struct   tasklet;
+       struct clk              *clk;
+
+       u8                      all_chan_mask;
+
+       struct dw_dma_chan      chan[0];
+};
+
+static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
+{
+       return dw->regs;
+}
+
+#define dma_readl(dw, name) \
+       __raw_readl(&(__dw_regs(dw)->name))
+#define dma_writel(dw, name, val) \
+       __raw_writel((val), &(__dw_regs(dw)->name))
+
+#define channel_set_bit(dw, reg, mask) \
+       dma_writel(dw, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(dw, reg, mask) \
+       dma_writel(dw, reg, ((mask) << 8) | 0)
+
+static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
+{
+       return container_of(ddev, struct dw_dma, dma);
+}
+
+/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+struct dw_lli {
+       /* values that are not changed by hardware */
+       dma_addr_t      sar;
+       dma_addr_t      dar;
+       dma_addr_t      llp;            /* chain to next lli */
+       u32             ctllo;
+       /* values that may get written back: */
+       u32             ctlhi;
+       /* sstat and dstat can snapshot peripheral register state.
+        * silicon config may discard either or both...
+        */
+       u32             sstat;
+       u32             dstat;
+};
+
+struct dw_desc {
+       /* FIRST values the hardware uses */
+       struct dw_lli                   lli;
+
+       /* THEN values for driver housekeeping */
+       struct list_head                desc_node;
+       struct dma_async_tx_descriptor  txd;
+       size_t                          len;
+};
+
+static inline struct dw_desc *
+txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
+{
+       return container_of(txd, struct dw_desc, txd);
+}
index 054eabffc185a893ff32ab51c01dfd876ab84b43..c0059ca5834075e70f3fc59512d9ff69617bee29 100644 (file)
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
  *
  * Return - The number of descriptors allocated.
  */
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
+                                       struct dma_client *client)
 {
        struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
        LIST_HEAD(tmp_list);
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
        if (!src) {
                dev_err(fsl_chan->dev,
                                "selftest: Cannot alloc memory for test!\n");
-               err = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
 
        dest = src + test_size;
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
 
        chan = &fsl_chan->common;
 
-       if (fsl_dma_alloc_chan_resources(chan) < 1) {
+       if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
                dev_err(fsl_chan->dev,
                                "selftest: Cannot alloc resources for DMA\n");
                err = -ENODEV;
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
        if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
                dev_err(fsl_chan->dev, "selftest: Time out!\n");
                err = -ENODEV;
-               goto out;
+               goto free_resources;
        }
 
        /* Test free and re-alloc channel resources */
        fsl_dma_free_chan_resources(chan);
 
-       if (fsl_dma_alloc_chan_resources(chan) < 1) {
+       if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
                dev_err(fsl_chan->dev,
                                "selftest: Cannot alloc resources for DMA\n");
                err = -ENODEV;
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
        if (!new_fsl_chan) {
                dev_err(&dev->dev, "No free memory for allocating "
                                "dma channels!\n");
-               err = -ENOMEM;
-               goto err;
+               return -ENOMEM;
        }
 
        /* get dma channel register base */
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
        if (err) {
                dev_err(&dev->dev, "Can't get %s property 'reg'\n",
                                dev->node->full_name);
-               goto err;
+               goto err_no_reg;
        }
 
        new_fsl_chan->feature = *(u32 *)match->data;
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
                dev_err(&dev->dev, "There is no %d channel!\n",
                                new_fsl_chan->id);
                err = -EINVAL;
-               goto err;
+               goto err_no_chan;
        }
        fdev->chan[new_fsl_chan->id] = new_fsl_chan;
        tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
                if (err) {
                        dev_err(&dev->dev, "DMA channel %s request_irq error "
                                "with return %d\n", dev->node->full_name, err);
-                       goto err;
+                       goto err_no_irq;
                }
        }
 
        err = fsl_dma_self_test(new_fsl_chan);
        if (err)
-               goto err;
+               goto err_self_test;
 
        dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
                                match->compatible, new_fsl_chan->irq);
 
        return 0;
-err:
-       dma_halt(new_fsl_chan);
-       iounmap(new_fsl_chan->reg_base);
+
+err_self_test:
        free_irq(new_fsl_chan->irq, new_fsl_chan);
+err_no_irq:
        list_del(&new_fsl_chan->common.device_node);
+err_no_chan:
+       iounmap(new_fsl_chan->reg_base);
+err_no_reg:
        kfree(new_fsl_chan);
        return err;
 }
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
        fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
        if (!fdev) {
                dev_err(&dev->dev, "No enough memory for 'priv'\n");
-               err = -ENOMEM;
-               goto err;
+               return -ENOMEM;
        }
        fdev->dev = &dev->dev;
        INIT_LIST_HEAD(&fdev->common.channels);
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
        if (err) {
                dev_err(&dev->dev, "Can't get %s property 'reg'\n",
                                dev->node->full_name);
-               goto err;
+               goto err_no_reg;
        }
 
        dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
 
 err:
        iounmap(fdev->reg_base);
+err_no_reg:
        kfree(fdev);
        return err;
 }
index 16e0fd8facfb8ca30aaa4427815f165b0f60317f..9b16a3af9a0af6a9a377d0f10048216ac0f86510 100644 (file)
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
 
        /* I/OAT v2 platforms */
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
+
+       /* I/OAT v3 platforms */
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
        { 0, }
 };
 
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
                if (device->dma && ioat_dca_enabled)
                        device->dca = ioat2_dca_init(pdev, iobase);
                break;
+       case IOAT_VER_3_0:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat3_dca_init(pdev, iobase);
+               break;
        default:
                err = -ENODEV;
                break;
index 9e922760b7ffc4b76688bf56652e72085827e7da..6cf622da0286481fccb32a73285c72c83fb6a57b 100644 (file)
 #include "ioatdma_registers.h"
 
 /*
- * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15
+ * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
  * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
  * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
  */
 #define DCA_TAG_MAP_VALID 0x80
 
+#define DCA3_TAG_MAP_BIT_TO_INV 0x80
+#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
+#define DCA3_TAG_MAP_LITERAL_VAL 0x1
+
+#define DCA_TAG_MAP_MASK 0xDF
+
 /*
  * "Legacy" DCA systems do not implement the DCA register set in the
  * I/OAT device.  Software needs direct support for their tag mappings.
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
 };
 
 #define IOAT_DCA_MAX_REQ 6
+#define IOAT3_DCA_MAX_REQ 2
 
 struct ioat_dca_priv {
        void __iomem            *iobase;
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
        return -ENODEV;
 }
 
-static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
+                          struct device *dev,
+                          int cpu)
 {
        struct ioat_dca_priv *ioatdca = dca_priv(dca);
        int i, apic_id, bit, value;
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
        return tag;
 }
 
+static int ioat_dca_dev_managed(struct dca_provider *dca,
+                               struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+
+       pdev = to_pci_dev(dev);
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == pdev)
+                       return 1;
+       }
+       return 0;
+}
+
 static struct dca_ops ioat_dca_ops = {
        .add_requester          = ioat_dca_add_requester,
        .remove_requester       = ioat_dca_remove_requester,
        .get_tag                = ioat_dca_get_tag,
+       .dev_managed            = ioat_dca_dev_managed,
 };
 
 
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
        u8 *tag_map = NULL;
        int i;
        int err;
+       u8 version;
+       u8 max_requesters;
 
        if (!system_has_dca_enabled(pdev))
                return NULL;
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
        if (tag_map == NULL)
                return NULL;
 
+       version = readb(iobase + IOAT_VER_OFFSET);
+       if (version == IOAT_VER_3_0)
+               max_requesters = IOAT3_DCA_MAX_REQ;
+       else
+               max_requesters = IOAT_DCA_MAX_REQ;
+
        dca = alloc_dca_provider(&ioat_dca_ops,
                        sizeof(*ioatdca) +
-                       (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ));
+                       (sizeof(struct ioat_dca_slot) * max_requesters));
        if (!dca)
                return NULL;
 
        ioatdca = dca_priv(dca);
-       ioatdca->max_requesters = IOAT_DCA_MAX_REQ;
-
+       ioatdca->max_requesters = max_requesters;
        ioatdca->dca_base = iobase + 0x54;
 
        /* copy over the APIC ID to DCA tag mapping */
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
        return -ENODEV;
 }
 
-static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu)
+static u8 ioat2_dca_get_tag(struct dca_provider *dca,
+                           struct device *dev,
+                           int cpu)
 {
        u8 tag;
 
-       tag = ioat_dca_get_tag(dca, cpu);
+       tag = ioat_dca_get_tag(dca, dev, cpu);
        tag = (~tag) & 0x1F;
        return tag;
 }
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
        .add_requester          = ioat2_dca_add_requester,
        .remove_requester       = ioat2_dca_remove_requester,
        .get_tag                = ioat2_dca_get_tag,
+       .dev_managed            = ioat_dca_dev_managed,
 };
 
 static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 
        return dca;
 }
+
+static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 id;
+       u16 global_req_table;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+       id = dcaid_from_pcidev(pdev);
+
+       if (ioatdca->requester_count == ioatdca->max_requesters)
+               return -ENODEV;
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == NULL) {
+                       /* found an empty slot */
+                       ioatdca->requester_count++;
+                       ioatdca->req_slots[i].pdev = pdev;
+                       ioatdca->req_slots[i].rid = id;
+                       global_req_table =
+                             readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
+                       writel(id | IOAT_DCA_GREQID_VALID,
+                              ioatdca->iobase + global_req_table + (i * 4));
+                       return i;
+               }
+       }
+       /* Error, ioatdma->requester_count is out of whack */
+       return -EFAULT;
+}
+
+static int ioat3_dca_remove_requester(struct dca_provider *dca,
+                                     struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 global_req_table;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == pdev) {
+                       global_req_table =
+                             readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
+                       writel(0, ioatdca->iobase + global_req_table + (i * 4));
+                       ioatdca->req_slots[i].pdev = NULL;
+                       ioatdca->req_slots[i].rid = 0;
+                       ioatdca->requester_count--;
+                       return i;
+               }
+       }
+       return -ENODEV;
+}
+
+static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+                           struct device *dev,
+                           int cpu)
+{
+       u8 tag;
+
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       int i, apic_id, bit, value;
+       u8 entry;
+
+       tag = 0;
+       apic_id = cpu_physical_id(cpu);
+
+       for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
+               entry = ioatdca->tag_map[i];
+               if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
+                       bit = entry &
+                               ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
+                       value = (apic_id & (1 << bit)) ? 1 : 0;
+               } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
+                       bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
+                       value = (apic_id & (1 << bit)) ? 0 : 1;
+               } else {
+                       value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
+               }
+               tag |= (value << i);
+       }
+
+       return tag;
+}
+
+static struct dca_ops ioat3_dca_ops = {
+       .add_requester          = ioat3_dca_add_requester,
+       .remove_requester       = ioat3_dca_remove_requester,
+       .get_tag                = ioat3_dca_get_tag,
+       .dev_managed            = ioat_dca_dev_managed,
+};
+
+static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+{
+       int slots = 0;
+       u32 req;
+       u16 global_req_table;
+
+       global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
+       if (global_req_table == 0)
+               return 0;
+
+       do {
+               req = readl(iobase + global_req_table + (slots * sizeof(u32)));
+               slots++;
+       } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
+
+       return slots;
+}
+
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+       struct dca_provider *dca;
+       struct ioat_dca_priv *ioatdca;
+       int slots;
+       int i;
+       int err;
+       u16 dca_offset;
+       u16 csi_fsb_control;
+       u16 pcie_control;
+       u8 bit;
+
+       union {
+               u64 full;
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+       } tag_map;
+
+       if (!system_has_dca_enabled(pdev))
+               return NULL;
+
+       dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
+       if (dca_offset == 0)
+               return NULL;
+
+       slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+       if (slots == 0)
+               return NULL;
+
+       dca = alloc_dca_provider(&ioat3_dca_ops,
+                                sizeof(*ioatdca)
+                                     + (sizeof(struct ioat_dca_slot) * slots));
+       if (!dca)
+               return NULL;
+
+       ioatdca = dca_priv(dca);
+       ioatdca->iobase = iobase;
+       ioatdca->dca_base = iobase + dca_offset;
+       ioatdca->max_requesters = slots;
+
+       /* some bios might not know to turn these on */
+       csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
+       if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
+               csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
+               writew(csi_fsb_control,
+                      ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
+       }
+       pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
+       if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
+               pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
+               writew(pcie_control,
+                      ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
+       }
+
+
+       /* TODO version, compatibility and configuration checks */
+
+       /* copy out the APIC to DCA tag map */
+       tag_map.low =
+               readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
+       tag_map.high =
+               readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
+       for (i = 0; i < 8; i++) {
+               bit = tag_map.full >> (8 * i);
+               ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
+       }
+
+       err = register_dca_provider(dca, &pdev->dev);
+       if (err) {
+               free_dca_provider(dca);
+               return NULL;
+       }
+
+       return dca;
+}
index 318e8a22d81423a4da8cfb759be3c9854148252e..a52156e568867e827c30f0008de9477df493b730 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
 #include "ioatdma.h"
 #include "ioatdma_registers.h"
 #include "ioatdma_hw.h"
 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
 
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
 static int ioat_pending_level = 4;
 module_param(ioat_pending_level, int, 0644);
 MODULE_PARM_DESC(ioat_pending_level,
                 "high-water mark for pushing ioat descriptors (default: 4)");
 
+#define RESET_DELAY  msecs_to_jiffies(100)
+#define WATCHDOG_DELAY  round_jiffies(msecs_to_jiffies(2000))
+static void ioat_dma_chan_reset_part2(struct work_struct *work);
+static void ioat_dma_chan_watchdog(struct work_struct *work);
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
 /* internal functions */
 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
        int i;
        struct ioat_dma_chan *ioat_chan;
 
+       /*
+        * IOAT ver.3 workarounds
+        */
+       if (device->version == IOAT_VER_3_0) {
+               u32 chan_err_mask;
+               u16 dev_id;
+               u32 dmauncerrsts;
+
+               /*
+                * Write CHANERRMSK_INT with 3E07h to mask out the errors
+                * that can cause stability issues for IOAT ver.3
+                */
+               chan_err_mask = 0x3E07;
+               pci_write_config_dword(device->pdev,
+                       IOAT_PCI_CHANERRMASK_INT_OFFSET,
+                       chan_err_mask);
+
+               /*
+                * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+                * (workaround for spurious config parity error after restart)
+                */
+               pci_read_config_word(device->pdev,
+                       IOAT_PCI_DEVICE_ID_OFFSET,
+                       &dev_id);
+               if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+                       dmauncerrsts = 0x10;
+                       pci_write_config_dword(device->pdev,
+                               IOAT_PCI_DMAUNCERRSTS_OFFSET,
+                               dmauncerrsts);
+               }
+       }
+
        device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
        xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
        xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
                ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
                ioat_chan->xfercap = xfercap;
                ioat_chan->desccount = 0;
+               INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
                if (ioat_chan->device->version != IOAT_VER_1_2) {
                        writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
                                        | IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
-       if (ioat_chan->pending != 0) {
+       if (ioat_chan->pending > 0) {
                spin_lock_bh(&ioat_chan->desc_lock);
                __ioat1_dma_memcpy_issue_pending(ioat_chan);
                spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 
-       if (ioat_chan->pending != 0) {
+       if (ioat_chan->pending > 0) {
                spin_lock_bh(&ioat_chan->desc_lock);
                __ioat2_dma_memcpy_issue_pending(ioat_chan);
                spin_unlock_bh(&ioat_chan->desc_lock);
        }
 }
 
+
+/**
+ * ioat_dma_chan_reset_part2 - reinit the channel after a reset
+ */
+static void ioat_dma_chan_reset_part2(struct work_struct *work)
+{
+       struct ioat_dma_chan *ioat_chan =
+               container_of(work, struct ioat_dma_chan, work.work);
+       struct ioat_desc_sw *desc;
+
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+       spin_lock_bh(&ioat_chan->desc_lock);
+
+       ioat_chan->completion_virt->low = 0;
+       ioat_chan->completion_virt->high = 0;
+       ioat_chan->pending = 0;
+
+       /*
+        * count the descriptors waiting, and be sure to do it
+        * right for both the CB1 line and the CB2 ring
+        */
+       ioat_chan->dmacount = 0;
+       if (ioat_chan->used_desc.prev) {
+               desc = to_ioat_desc(ioat_chan->used_desc.prev);
+               do {
+                       ioat_chan->dmacount++;
+                       desc = to_ioat_desc(desc->node.next);
+               } while (&desc->node != ioat_chan->used_desc.next);
+       }
+
+       /*
+        * write the new starting descriptor address
+        * this puts channel engine into ARMED state
+        */
+       desc = to_ioat_desc(ioat_chan->used_desc.prev);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+               break;
+       case IOAT_VER_2_0:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+               /* tell the engine to go with what's left to be done */
+               writew(ioat_chan->dmacount,
+                      ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+
+               break;
+       }
+       dev_err(&ioat_chan->device->pdev->dev,
+               "chan%d reset - %d descs waiting, %d total desc\n",
+               chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+
+       spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+/**
+ * ioat_dma_reset_channel - restart a channel
+ * @ioat_chan: IOAT DMA channel handle
+ */
+static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
+{
+       u32 chansts, chanerr;
+
+       if (!ioat_chan->used_desc.prev)
+               return;
+
+       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       chansts = (ioat_chan->completion_virt->low
+                                       & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
+       if (chanerr) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
+                       chan_num(ioat_chan), chansts, chanerr);
+               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       }
+
+       /*
+        * whack it upside the head with a reset
+        * and wait for things to settle out.
+        * force the pending count to a really big negative
+        * to make sure no one forces an issue_pending
+        * while we're waiting.
+        */
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       ioat_chan->pending = INT_MIN;
+       writeb(IOAT_CHANCMD_RESET,
+              ioat_chan->reg_base
+              + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       /* schedule the 2nd half instead of sleeping a long time */
+       schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
+}
+
+/**
+ * ioat_dma_chan_watchdog - watch for stuck channels
+ */
+static void ioat_dma_chan_watchdog(struct work_struct *work)
+{
+       struct ioatdma_device *device =
+               container_of(work, struct ioatdma_device, work.work);
+       struct ioat_dma_chan *ioat_chan;
+       int i;
+
+       union {
+               u64 full;
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+       } completion_hw;
+       unsigned long compl_desc_addr_hw;
+
+       for (i = 0; i < device->common.chancnt; i++) {
+               ioat_chan = ioat_lookup_chan_by_index(device, i);
+
+               if (ioat_chan->device->version == IOAT_VER_1_2
+                       /* have we started processing anything yet */
+                   && ioat_chan->last_completion
+                       /* have we completed any since last watchdog cycle? */
+                   && (ioat_chan->last_completion ==
+                               ioat_chan->watchdog_completion)
+                       /* has TCP stuck on one cookie since last watchdog? */
+                   && (ioat_chan->watchdog_tcp_cookie ==
+                               ioat_chan->watchdog_last_tcp_cookie)
+                   && (ioat_chan->watchdog_tcp_cookie !=
+                               ioat_chan->completed_cookie)
+                       /* is there something in the chain to be processed? */
+                       /* CB1 chain always has at least the last one processed */
+                   && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
+                   && ioat_chan->pending == 0) {
+
+                       /*
+                        * check CHANSTS register for completed
+                        * descriptor address.
+                        * if it is different than completion writeback,
+                        * it is not zero
+                        * and it has changed since the last watchdog
+                        *     we can assume that channel
+                        *     is still working correctly
+                        *     and the problem is in completion writeback.
+                        *     update completion writeback
+                        *     with actual CHANSTS value
+                        * else
+                        *     try resetting the channel
+                        */
+
+                       completion_hw.low = readl(ioat_chan->reg_base +
+                               IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
+                       completion_hw.high = readl(ioat_chan->reg_base +
+                               IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
+#if (BITS_PER_LONG == 64)
+                       compl_desc_addr_hw =
+                               completion_hw.full
+                               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+#else
+                       compl_desc_addr_hw =
+                               completion_hw.low & IOAT_LOW_COMPLETION_MASK;
+#endif
+
+                       if ((compl_desc_addr_hw != 0)
+                          && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
+                          && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
+                               ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
+                               ioat_chan->completion_virt->low = completion_hw.low;
+                               ioat_chan->completion_virt->high = completion_hw.high;
+                       } else {
+                               ioat_dma_reset_channel(ioat_chan);
+                               ioat_chan->watchdog_completion = 0;
+                               ioat_chan->last_compl_desc_addr_hw = 0;
+                       }
+
+               /*
+                * for version 2.0 if there are descriptors yet to be processed
+                * and the last completed hasn't changed since the last watchdog
+                *      if they haven't hit the pending level
+                *          issue the pending to push them through
+                *      else
+                *          try resetting the channel
+                */
+               } else if (ioat_chan->device->version == IOAT_VER_2_0
+                   && ioat_chan->used_desc.prev
+                   && ioat_chan->last_completion
+                   && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
+
+                       if (ioat_chan->pending < ioat_pending_level)
+                               ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
+                       else {
+                               ioat_dma_reset_channel(ioat_chan);
+                               ioat_chan->watchdog_completion = 0;
+                       }
+               } else {
+                       ioat_chan->last_compl_desc_addr_hw = 0;
+                       ioat_chan->watchdog_completion
+                                       = ioat_chan->last_completion;
+               }
+
+               ioat_chan->watchdog_last_tcp_cookie =
+                       ioat_chan->watchdog_tcp_cookie;
+       }
+
+       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
+}
+
 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
                prev = new;
        } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
 
+       if (!new) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "tx submit failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return -ENOMEM;
+       }
+
        hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
        if (new->async_tx.callback) {
                hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
                desc_count++;
        } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
 
-       hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       if (!new) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "tx submit failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return -ENOMEM;
+       }
+
+       hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
        if (new->async_tx.callback) {
                hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
                if (first != new) {
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
                desc_sw->async_tx.tx_submit = ioat1_tx_submit;
                break;
        case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
                desc_sw->async_tx.tx_submit = ioat2_tx_submit;
                break;
        }
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
  * @chan: the channel to be filled out
  */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
+                                        struct dma_client *client)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
        struct ioat_desc_sw *desc;
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
                }
                break;
        case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
                list_for_each_entry_safe(desc, _desc,
                                         ioat_chan->free_desc.next, node) {
                        list_del(&desc->node);
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
        ioat_chan->last_completion = ioat_chan->completion_addr = 0;
        ioat_chan->pending = 0;
        ioat_chan->dmacount = 0;
+       ioat_chan->watchdog_completion = 0;
+       ioat_chan->last_compl_desc_addr_hw = 0;
+       ioat_chan->watchdog_tcp_cookie =
+               ioat_chan->watchdog_last_tcp_cookie = 0;
 }
 
 /**
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
 
                /* set up the noop descriptor */
                noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
-               noop_desc->hw->size = 0;
+               /* set size to non-zero value (channel returns error when size is 0) */
+               noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
                noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
                noop_desc->hw->src_addr = 0;
                noop_desc->hw->dst_addr = 0;
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
                return ioat1_dma_get_next_descriptor(ioat_chan);
                break;
        case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
                return ioat2_dma_get_next_descriptor(ioat_chan);
                break;
        }
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
                new->src = dma_src;
                new->async_tx.flags = flags;
                return &new->async_tx;
-       } else
+       } else {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
                return NULL;
+       }
 }
 
 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
                new->src = dma_src;
                new->async_tx.flags = flags;
                return &new->async_tx;
-       } else
+       } else {
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
                return NULL;
+       }
 }
 
 static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
               chan->reg_base + IOAT_CHANCTRL_OFFSET);
 }
 
+static void
+ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
+{
+       /*
+        * yes we are unmapping both _page and _single
+        * alloc'd regions with unmap_page. Is this
+        * *really* that bad?
+        */
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
+               pci_unmap_page(ioat_chan->device->pdev,
+                               pci_unmap_addr(desc, dst),
+                               pci_unmap_len(desc, len),
+                               PCI_DMA_FROMDEVICE);
+
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
+               pci_unmap_page(ioat_chan->device->pdev,
+                               pci_unmap_addr(desc, src),
+                               pci_unmap_len(desc, len),
+                               PCI_DMA_TODEVICE);
+}
+
 /**
  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
  * @chan: ioat channel to be cleaned up
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
 
        if (phys_complete == ioat_chan->last_completion) {
                spin_unlock_bh(&ioat_chan->cleanup_lock);
+               /*
+                * perhaps we're stuck so hard that the watchdog can't go off?
+                * try to catch it after 2 seconds
+                */
+               if (ioat_chan->device->version != IOAT_VER_3_0) {
+                       if (time_after(jiffies,
+                                      ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
+                               ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
+                               ioat_chan->last_completion_time = jiffies;
+                       }
+               }
                return;
        }
+       ioat_chan->last_completion_time = jiffies;
 
        cookie = 0;
-       spin_lock_bh(&ioat_chan->desc_lock);
+       if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               return;
+       }
+
        switch (ioat_chan->device->version) {
        case IOAT_VER_1_2:
                list_for_each_entry_safe(desc, _desc,
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
                         */
                        if (desc->async_tx.cookie) {
                                cookie = desc->async_tx.cookie;
-
-                               /*
-                                * yes we are unmapping both _page and _single
-                                * alloc'd regions with unmap_page. Is this
-                                * *really* that bad?
-                                */
-                               pci_unmap_page(ioat_chan->device->pdev,
-                                               pci_unmap_addr(desc, dst),
-                                               pci_unmap_len(desc, len),
-                                               PCI_DMA_FROMDEVICE);
-                               pci_unmap_page(ioat_chan->device->pdev,
-                                               pci_unmap_addr(desc, src),
-                                               pci_unmap_len(desc, len),
-                                               PCI_DMA_TODEVICE);
-
+                               ioat_dma_unmap(ioat_chan, desc);
                                if (desc->async_tx.callback) {
                                        desc->async_tx.callback(desc->async_tx.callback_param);
                                        desc->async_tx.callback = NULL;
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
                }
                break;
        case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
                /* has some other thread has already cleaned up? */
                if (ioat_chan->used_desc.prev == NULL)
                        break;
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
                                if (desc->async_tx.cookie) {
                                        cookie = desc->async_tx.cookie;
                                        desc->async_tx.cookie = 0;
-
-                                       pci_unmap_page(ioat_chan->device->pdev,
-                                                     pci_unmap_addr(desc, dst),
-                                                     pci_unmap_len(desc, len),
-                                                     PCI_DMA_FROMDEVICE);
-                                       pci_unmap_page(ioat_chan->device->pdev,
-                                                     pci_unmap_addr(desc, src),
-                                                     pci_unmap_len(desc, len),
-                                                     PCI_DMA_TODEVICE);
-
+                                       ioat_dma_unmap(ioat_chan, desc);
                                        if (desc->async_tx.callback) {
                                                desc->async_tx.callback(desc->async_tx.callback_param);
                                                desc->async_tx.callback = NULL;
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
 
        last_used = chan->cookie;
        last_complete = ioat_chan->completed_cookie;
+       ioat_chan->watchdog_tcp_cookie = cookie;
 
        if (done)
                *done = last_complete;
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
        spin_lock_bh(&ioat_chan->desc_lock);
 
        desc = ioat_dma_get_next_descriptor(ioat_chan);
+
+       if (!desc) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "Unable to start null desc - get next desc failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return;
+       }
+
        desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
                                | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
                                | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-       desc->hw->size = 0;
+       /* set size to non-zero value (channel returns error when size is 0) */
+       desc->hw->size = NULL_DESC_BUFFER_SIZE;
        desc->hw->src_addr = 0;
        desc->hw->dst_addr = 0;
        async_tx_ack(&desc->async_tx);
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
                        + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
                break;
        case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
                writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
                       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
                writel(((u64) desc->async_tx.phys) >> 32,
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
+       if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
                dev_err(&device->pdev->dev,
                        "selftest cannot allocate chan resource\n");
                err = -ENODEV;
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
                                                ioat1_dma_memcpy_issue_pending;
                break;
        case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
                device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
                device->common.device_issue_pending =
                                                ioat2_dma_memcpy_issue_pending;
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
        if (err)
                goto err_self_test;
 
+       ioat_set_tcp_copy_break(device);
+
        dma_async_device_register(&device->common);
 
+       if (device->version != IOAT_VER_3_0) {
+               INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
+               schedule_delayed_work(&device->work,
+                                     WATCHDOG_DELAY);
+       }
+
        return device;
 
 err_self_test:
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device)
        pci_release_regions(device->pdev);
        pci_disable_device(device->pdev);
 
+       if (device->version != IOAT_VER_3_0) {
+               cancel_delayed_work(&device->work);
+       }
+
        list_for_each_entry_safe(chan, _chan,
                                 &device->common.channels, device_node) {
                ioat_chan = to_ioat_chan(chan);
index f2c7fedbf009b545c72e13fe69cfae54117ed4b9..a3306d0e1372a44b2950bc06d220d9b716f0bb00 100644 (file)
@@ -27,8 +27,9 @@
 #include <linux/dmapool.h>
 #include <linux/cache.h>
 #include <linux/pci_ids.h>
+#include <net/tcp.h>
 
-#define IOAT_DMA_VERSION  "2.04"
+#define IOAT_DMA_VERSION  "3.30"
 
 enum ioat_interrupt {
        none = 0,
@@ -40,6 +41,7 @@ enum ioat_interrupt {
 
 #define IOAT_LOW_COMPLETION_MASK       0xffffffc0
 #define IOAT_DMA_DCA_ANY_CPU           ~0
+#define IOAT_WATCHDOG_PERIOD           (2 * HZ)
 
 
 /**
@@ -62,6 +64,7 @@ struct ioatdma_device {
        struct dma_device common;
        u8 version;
        enum ioat_interrupt irq_mode;
+       struct delayed_work work;
        struct msix_entry msix_entries[4];
        struct ioat_dma_chan *idx[4];
 };
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
 
        dma_cookie_t completed_cookie;
        unsigned long last_completion;
+       unsigned long last_completion_time;
 
        size_t xfercap; /* XFERCAP register value expanded out */
 
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
        spinlock_t desc_lock;
        struct list_head free_desc;
        struct list_head used_desc;
+       unsigned long watchdog_completion;
+       int watchdog_tcp_cookie;
+       u32 watchdog_last_tcp_cookie;
+       struct delayed_work work;
 
        int pending;
        int dmacount;
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
                        u32 high;
                };
        } *completion_virt;
+       unsigned long last_compl_desc_addr_hw;
        struct tasklet_struct cleanup_task;
 };
 
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
        struct dma_async_tx_descriptor async_tx;
 };
 
+static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
+{
+       #ifdef CONFIG_NET_DMA
+       switch (dev->version) {
+       case IOAT_VER_1_2:
+       case IOAT_VER_3_0:
+               sysctl_tcp_dma_copybreak = 4096;
+               break;
+       case IOAT_VER_2_0:
+               sysctl_tcp_dma_copybreak = 2048;
+               break;
+       }
+       #endif
+}
+
 #if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
                                      void __iomem *iobase);
 void ioat_dma_remove(struct ioatdma_device *device);
 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
 struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
 #else
 #define ioat_dma_probe(pdev, iobase)    NULL
 #define ioat_dma_remove(device)         do { } while (0)
 #define ioat_dca_init(pdev, iobase)    NULL
 #define ioat2_dca_init(pdev, iobase)   NULL
+#define ioat3_dca_init(pdev, iobase)   NULL
 #endif
 
 #endif /* IOATDMA_H */
index dd470fa91d86a4e10d8187e63d095125fb5a8526..f1ae2c776f7487b40e695395e3562679f420b29e 100644 (file)
@@ -35,6 +35,7 @@
 #define IOAT_PCI_SID            0x8086
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
+#define IOAT_VER_3_0            0x30    /* Version 3.0 */
 
 struct ioat_dma_descriptor {
        uint32_t        size;
index 9832d7ebd931a0a76dafc6d66948b37e4a16da52..827cb503cac6979a0fa98e3fbd25a113e14fda27 100644 (file)
 #define IOAT_PCI_DMACTRL_DMA_EN                        0x00000001
 #define IOAT_PCI_DMACTRL_MSI_EN                        0x00000002
 
+#define IOAT_PCI_DEVICE_ID_OFFSET              0x02
+#define IOAT_PCI_DMAUNCERRSTS_OFFSET           0x148
+#define IOAT_PCI_CHANERRMASK_INT_OFFSET                0x184
+
 /* MMIO Device Registers */
 #define IOAT_CHANCNT_OFFSET                    0x00    /*  8-bit */
 
 #define IOAT_DCA_GREQID_VALID       0x20000000
 #define IOAT_DCA_GREQID_LASTID      0x80000000
 
+#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
+#define IOAT3_CSI_CAPABILITY_PREFETCH    0x1
+
+#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
+#define IOAT3_PCI_CAPABILITY_MEMWR  0x1
+
+#define IOAT3_CSI_CONTROL_OFFSET    0x0C
+#define IOAT3_CSI_CONTROL_PREFETCH  0x1
+
+#define IOAT3_PCI_CONTROL_OFFSET    0x0E
+#define IOAT3_PCI_CONTROL_MEMWR     0x1
+
+#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
+#define IOAT3_APICID_TAG_MAP_OFFSET_LOW  0x10
+#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
 
+#define IOAT3_DCA_GREQID_OFFSET     0x02
 
 #define IOAT1_CHAINADDR_OFFSET         0x0C    /* 64-bit Descriptor Chain Address Register */
 #define IOAT2_CHAINADDR_OFFSET         0x10    /* 64-bit Descriptor Chain Address Register */
index 0ec0f431e6a1d4bc30a179850ddadfbcec2d1ff3..85bfeba4d85eab01eb9e2c7b339eeccd674e2b0b 100644 (file)
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
                        struct device *dev =
                                &iop_chan->device->pdev->dev;
                        u32 len = unmap->unmap_len;
-                       u32 src_cnt = unmap->unmap_src_cnt;
-                       dma_addr_t addr = iop_desc_get_dest_addr(unmap,
-                               iop_chan);
-
-                       dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
-                       while (src_cnt--) {
-                               addr = iop_desc_get_src_addr(unmap,
-                                                       iop_chan,
-                                                       src_cnt);
-                               dma_unmap_page(dev, addr, len,
-                                       DMA_TO_DEVICE);
+                       enum dma_ctrl_flags flags = desc->async_tx.flags;
+                       u32 src_cnt;
+                       dma_addr_t addr;
+
+                       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+                               addr = iop_desc_get_dest_addr(unmap, iop_chan);
+                               dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+                       }
+
+                       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+                               src_cnt = unmap->unmap_src_cnt;
+                               while (src_cnt--) {
+                                       addr = iop_desc_get_src_addr(unmap,
+                                                                    iop_chan,
+                                                                    src_cnt);
+                                       dma_unmap_page(dev, addr, len,
+                                                      DMA_TO_DEVICE);
+                               }
                        }
                        desc->group_head = NULL;
                }
@@ -366,8 +373,8 @@ retry:
        if (!retry++)
                goto retry;
 
-       /* try to free some slots if the allocation fails */
-       tasklet_schedule(&iop_chan->irq_tasklet);
+       /* perform direct reclaim if the allocation fails */
+       __iop_adma_slot_cleanup(iop_chan);
 
        return NULL;
 }
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
 
-/* returns the number of allocated descriptors */
-static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
+/**
+ * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
+ * @chan - allocate descriptor resources for this channel
+ * @client - current client requesting the channel be ready for requests
+ *
+ * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
+ * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
+ * greater than 2x the number slots needed to satisfy a device->max_xor
+ * request.
+ * */
+static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
+                                        struct dma_client *client)
 {
        char *hw_desc;
        int idx;
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+       if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
                err = -ENODEV;
                goto out;
        }
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+       if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
                err = -ENODEV;
                goto out;
        }
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
        spin_unlock_bh(&iop_chan->lock);
 }
 
+MODULE_ALIAS("platform:iop-adma");
+
 static struct platform_driver iop_adma_driver = {
        .probe          = iop_adma_probe,
        .remove         = iop_adma_remove,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
new file mode 100644 (file)
index 0000000..a4e4494
--- /dev/null
@@ -0,0 +1,1375 @@
+/*
+ * offload engine driver for the Marvell XOR engine
+ * Copyright (C) 2007, 2008, Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <asm/plat-orion/mv_xor.h>
+#include "mv_xor.h"
+
+static void mv_xor_issue_pending(struct dma_chan *chan);
+
+#define to_mv_xor_chan(chan)           \
+       container_of(chan, struct mv_xor_chan, common)
+
+#define to_mv_xor_device(dev)          \
+       container_of(dev, struct mv_xor_device, common)
+
+#define to_mv_xor_slot(tx)             \
+       container_of(tx, struct mv_xor_desc_slot, async_tx)
+
+static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+       hw_desc->status = (1 << 31);
+       hw_desc->phy_next_desc = 0;
+       hw_desc->desc_command = (1 << 31);
+}
+
+static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       return hw_desc->phy_dest_addr;
+}
+
+static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
+                               int src_idx)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       return hw_desc->phy_src_addr[src_idx];
+}
+
+
+static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
+                                  u32 byte_count)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       hw_desc->byte_count = byte_count;
+}
+
+static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
+                                 u32 next_desc_addr)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       BUG_ON(hw_desc->phy_next_desc);
+       hw_desc->phy_next_desc = next_desc_addr;
+}
+
+static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       hw_desc->phy_next_desc = 0;
+}
+
+static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
+{
+       desc->value = val;
+}
+
+static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
+                                 dma_addr_t addr)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       hw_desc->phy_dest_addr = addr;
+}
+
+static int mv_chan_memset_slot_count(size_t len)
+{
+       return 1;
+}
+
+#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
+
+static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
+                                int index, dma_addr_t addr)
+{
+       struct mv_xor_desc *hw_desc = desc->hw_desc;
+       hw_desc->phy_src_addr[index] = addr;
+       if (desc->type == DMA_XOR)
+               hw_desc->desc_command |= (1 << index);
+}
+
+static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
+{
+       return __raw_readl(XOR_CURR_DESC(chan));
+}
+
+static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
+                                       u32 next_desc_addr)
+{
+       __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+}
+
+static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
+{
+       __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
+}
+
+static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
+{
+       __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
+}
+
+static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
+{
+       __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
+       __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
+}
+
+static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
+{
+       u32 val = __raw_readl(XOR_INTR_MASK(chan));
+       val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
+       __raw_writel(val, XOR_INTR_MASK(chan));
+}
+
+static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
+{
+       u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+       intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
+       return intr_cause;
+}
+
+static int mv_is_err_intr(u32 intr_cause)
+{
+       if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
+               return 1;
+
+       return 0;
+}
+
+static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
+{
+       u32 val = (1 << (1 + (chan->idx * 16)));
+       dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
+       __raw_writel(val, XOR_INTR_CAUSE(chan));
+}
+
+static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
+{
+       u32 val = 0xFFFF0000 >> (chan->idx * 16);
+       __raw_writel(val, XOR_INTR_CAUSE(chan));
+}
+
+static int mv_can_chain(struct mv_xor_desc_slot *desc)
+{
+       struct mv_xor_desc_slot *chain_old_tail = list_entry(
+               desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
+
+       if (chain_old_tail->type != desc->type)
+               return 0;
+       if (desc->type == DMA_MEMSET)
+               return 0;
+
+       return 1;
+}
+
+static void mv_set_mode(struct mv_xor_chan *chan,
+                              enum dma_transaction_type type)
+{
+       u32 op_mode;
+       u32 config = __raw_readl(XOR_CONFIG(chan));
+
+       switch (type) {
+       case DMA_XOR:
+               op_mode = XOR_OPERATION_MODE_XOR;
+               break;
+       case DMA_MEMCPY:
+               op_mode = XOR_OPERATION_MODE_MEMCPY;
+               break;
+       case DMA_MEMSET:
+               op_mode = XOR_OPERATION_MODE_MEMSET;
+               break;
+       default:
+               dev_printk(KERN_ERR, chan->device->common.dev,
+                          "error: unsupported operation %d.\n",
+                          type);
+               BUG();
+               return;
+       }
+
+       config &= ~0x7;
+       config |= op_mode;
+       __raw_writel(config, XOR_CONFIG(chan));
+       chan->current_type = type;
+}
+
+static void mv_chan_activate(struct mv_xor_chan *chan)
+{
+       u32 activation;
+
+       dev_dbg(chan->device->common.dev, " activate chan.\n");
+       activation = __raw_readl(XOR_ACTIVATION(chan));
+       activation |= 0x1;
+       __raw_writel(activation, XOR_ACTIVATION(chan));
+}
+
+static char mv_chan_is_busy(struct mv_xor_chan *chan)
+{
+       u32 state = __raw_readl(XOR_ACTIVATION(chan));
+
+       state = (state >> 4) & 0x3;
+
+       return (state == 1) ? 1 : 0;
+}
+
+static int mv_chan_xor_slot_count(size_t len, int src_cnt)
+{
+       return 1;
+}
+
+/**
+ * mv_xor_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &mv_chan->lock while calling this function
+ */
+static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
+                             struct mv_xor_desc_slot *slot)
+{
+       dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
+               __func__, __LINE__, slot);
+
+       slot->slots_per_op = 0;
+
+}
+
+/*
+ * mv_xor_start_new_chain - program the engine to operate on new chain headed by
+ * sw_desc
+ * Caller must hold &mv_chan->lock while calling this function
+ */
+static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
+                                  struct mv_xor_desc_slot *sw_desc)
+{
+       dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
+               __func__, __LINE__, sw_desc);
+       if (sw_desc->type != mv_chan->current_type)
+               mv_set_mode(mv_chan, sw_desc->type);
+
+       if (sw_desc->type == DMA_MEMSET) {
+               /* for memset requests we need to program the engine, no
+                * descriptors used.
+                */
+               struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
+               mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
+               mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
+               mv_chan_set_value(mv_chan, sw_desc->value);
+       } else {
+               /* set the hardware chain */
+               mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
+       }
+       mv_chan->pending += sw_desc->slot_cnt;
+       mv_xor_issue_pending(&mv_chan->common);
+}
+
+static dma_cookie_t
+mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
+       struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
+{
+       BUG_ON(desc->async_tx.cookie < 0);
+
+       if (desc->async_tx.cookie > 0) {
+               cookie = desc->async_tx.cookie;
+
+               /* call the callback (must not sleep or submit new
+                * operations to this channel)
+                */
+               if (desc->async_tx.callback)
+                       desc->async_tx.callback(
+                               desc->async_tx.callback_param);
+
+               /* unmap dma addresses
+                * (unmap_single vs unmap_page?)
+                */
+               if (desc->group_head && desc->unmap_len) {
+                       struct mv_xor_desc_slot *unmap = desc->group_head;
+                       struct device *dev =
+                               &mv_chan->device->pdev->dev;
+                       u32 len = unmap->unmap_len;
+                       enum dma_ctrl_flags flags = desc->async_tx.flags;
+                       u32 src_cnt;
+                       dma_addr_t addr;
+
+                       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+                               addr = mv_desc_get_dest_addr(unmap);
+                               dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+                       }
+
+                       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+                               src_cnt = unmap->unmap_src_cnt;
+                               while (src_cnt--) {
+                                       addr = mv_desc_get_src_addr(unmap,
+                                                                   src_cnt);
+                                       dma_unmap_page(dev, addr, len,
+                                                      DMA_TO_DEVICE);
+                               }
+                       }
+                       desc->group_head = NULL;
+               }
+       }
+
+       /* run dependent operations */
+       async_tx_run_dependencies(&desc->async_tx);
+
+       return cookie;
+}
+
+static int
+mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
+{
+       struct mv_xor_desc_slot *iter, *_iter;
+
+       dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+       list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
+                                completed_node) {
+
+               if (async_tx_test_ack(&iter->async_tx)) {
+                       list_del(&iter->completed_node);
+                       mv_xor_free_slots(mv_chan, iter);
+               }
+       }
+       return 0;
+}
+
+static int
+mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
+       struct mv_xor_chan *mv_chan)
+{
+       dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
+               __func__, __LINE__, desc, desc->async_tx.flags);
+       list_del(&desc->chain_node);
+       /* the client is allowed to attach dependent operations
+        * until 'ack' is set
+        */
+       if (!async_tx_test_ack(&desc->async_tx)) {
+               /* move this slot to the completed_slots */
+               list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
+               return 0;
+       }
+
+       mv_xor_free_slots(mv_chan, desc);
+       return 0;
+}
+
+static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+{
+       struct mv_xor_desc_slot *iter, *_iter;
+       dma_cookie_t cookie = 0;
+       int busy = mv_chan_is_busy(mv_chan);
+       u32 current_desc = mv_chan_get_current_desc(mv_chan);
+       int seen_current = 0;
+
+       dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+       dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+       mv_xor_clean_completed_slots(mv_chan);
+
+       /* free completed slots from the chain starting with
+        * the oldest descriptor
+        */
+
+       list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+                                       chain_node) {
+               prefetch(_iter);
+               prefetch(&_iter->async_tx);
+
+               /* do not advance past the current descriptor loaded into the
+                * hardware channel, subsequent descriptors are either in
+                * process or have not been submitted
+                */
+               if (seen_current)
+                       break;
+
+               /* stop the search if we reach the current descriptor and the
+                * channel is busy
+                */
+               if (iter->async_tx.phys == current_desc) {
+                       seen_current = 1;
+                       if (busy)
+                               break;
+               }
+
+               cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
+
+               if (mv_xor_clean_slot(iter, mv_chan))
+                       break;
+       }
+
+       if ((busy == 0) && !list_empty(&mv_chan->chain)) {
+               struct mv_xor_desc_slot *chain_head;
+               chain_head = list_entry(mv_chan->chain.next,
+                                       struct mv_xor_desc_slot,
+                                       chain_node);
+
+               mv_xor_start_new_chain(mv_chan, chain_head);
+       }
+
+       if (cookie > 0)
+               mv_chan->completed_cookie = cookie;
+}
+
+static void
+mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+{
+       spin_lock_bh(&mv_chan->lock);
+       __mv_xor_slot_cleanup(mv_chan);
+       spin_unlock_bh(&mv_chan->lock);
+}
+
+static void mv_xor_tasklet(unsigned long data)
+{
+       struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
+       __mv_xor_slot_cleanup(chan);
+}
+
+static struct mv_xor_desc_slot *
+mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
+                   int slots_per_op)
+{
+       struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
+       LIST_HEAD(chain);
+       int slots_found, retry = 0;
+
+       /* start search from the last allocated descrtiptor
+        * if a contiguous allocation can not be found start searching
+        * from the beginning of the list
+        */
+retry:
+       slots_found = 0;
+       if (retry == 0)
+               iter = mv_chan->last_used;
+       else
+               iter = list_entry(&mv_chan->all_slots,
+                       struct mv_xor_desc_slot,
+                       slot_node);
+
+       list_for_each_entry_safe_continue(
+               iter, _iter, &mv_chan->all_slots, slot_node) {
+               prefetch(_iter);
+               prefetch(&_iter->async_tx);
+               if (iter->slots_per_op) {
+                       /* give up after finding the first busy slot
+                        * on the second pass through the list
+                        */
+                       if (retry)
+                               break;
+
+                       slots_found = 0;
+                       continue;
+               }
+
+               /* start the allocation if the slot is correctly aligned */
+               if (!slots_found++)
+                       alloc_start = iter;
+
+               if (slots_found == num_slots) {
+                       struct mv_xor_desc_slot *alloc_tail = NULL;
+                       struct mv_xor_desc_slot *last_used = NULL;
+                       iter = alloc_start;
+                       while (num_slots) {
+                               int i;
+
+                               /* pre-ack all but the last descriptor */
+                               async_tx_ack(&iter->async_tx);
+
+                               list_add_tail(&iter->chain_node, &chain);
+                               alloc_tail = iter;
+                               iter->async_tx.cookie = 0;
+                               iter->slot_cnt = num_slots;
+                               iter->xor_check_result = NULL;
+                               for (i = 0; i < slots_per_op; i++) {
+                                       iter->slots_per_op = slots_per_op - i;
+                                       last_used = iter;
+                                       iter = list_entry(iter->slot_node.next,
+                                               struct mv_xor_desc_slot,
+                                               slot_node);
+                               }
+                               num_slots -= slots_per_op;
+                       }
+                       alloc_tail->group_head = alloc_start;
+                       alloc_tail->async_tx.cookie = -EBUSY;
+                       list_splice(&chain, &alloc_tail->async_tx.tx_list);
+                       mv_chan->last_used = last_used;
+                       mv_desc_clear_next_desc(alloc_start);
+                       mv_desc_clear_next_desc(alloc_tail);
+                       return alloc_tail;
+               }
+       }
+       if (!retry++)
+               goto retry;
+
+       /* try to free some slots if the allocation fails */
+       tasklet_schedule(&mv_chan->irq_tasklet);
+
+       return NULL;
+}
+
+static dma_cookie_t
+mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
+                     struct mv_xor_desc_slot *desc)
+{
+       dma_cookie_t cookie = mv_chan->common.cookie;
+
+       if (++cookie < 0)
+               cookie = 1;
+       mv_chan->common.cookie = desc->async_tx.cookie = cookie;
+       return cookie;
+}
+
+/************************ DMA engine API functions ****************************/
+static dma_cookie_t
+mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
+       struct mv_xor_desc_slot *grp_start, *old_chain_tail;
+       dma_cookie_t cookie;
+       int new_hw_chain = 1;
+
+       dev_dbg(mv_chan->device->common.dev,
+               "%s sw_desc %p: async_tx %p\n",
+               __func__, sw_desc, &sw_desc->async_tx);
+
+       grp_start = sw_desc->group_head;
+
+       spin_lock_bh(&mv_chan->lock);
+       cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
+
+       if (list_empty(&mv_chan->chain))
+               list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
+       else {
+               new_hw_chain = 0;
+
+               old_chain_tail = list_entry(mv_chan->chain.prev,
+                                           struct mv_xor_desc_slot,
+                                           chain_node);
+               list_splice_init(&grp_start->async_tx.tx_list,
+                                &old_chain_tail->chain_node);
+
+               if (!mv_can_chain(grp_start))
+                       goto submit_done;
+
+               dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
+                       old_chain_tail->async_tx.phys);
+
+               /* fix up the hardware chain */
+               mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
+
+               /* if the channel is not busy */
+               if (!mv_chan_is_busy(mv_chan)) {
+                       u32 current_desc = mv_chan_get_current_desc(mv_chan);
+                       /*
+                        * and the curren desc is the end of the chain before
+                        * the append, then we need to start the channel
+                        */
+                       if (current_desc == old_chain_tail->async_tx.phys)
+                               new_hw_chain = 1;
+               }
+       }
+
+       if (new_hw_chain)
+               mv_xor_start_new_chain(mv_chan, grp_start);
+
+submit_done:
+       spin_unlock_bh(&mv_chan->lock);
+
+       return cookie;
+}
+
+/* returns the number of allocated descriptors */
+static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
+                                      struct dma_client *client)
+{
+       char *hw_desc;
+       int idx;
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+       struct mv_xor_desc_slot *slot = NULL;
+       struct mv_xor_platform_data *plat_data =
+               mv_chan->device->pdev->dev.platform_data;
+       int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
+
+       /* Allocate descriptor slots */
+       idx = mv_chan->slots_allocated;
+       while (idx < num_descs_in_pool) {
+               slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+               if (!slot) {
+                       printk(KERN_INFO "MV XOR Channel only initialized"
+                               " %d descriptor slots", idx);
+                       break;
+               }
+               hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
+               slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+
+               dma_async_tx_descriptor_init(&slot->async_tx, chan);
+               slot->async_tx.tx_submit = mv_xor_tx_submit;
+               INIT_LIST_HEAD(&slot->chain_node);
+               INIT_LIST_HEAD(&slot->slot_node);
+               INIT_LIST_HEAD(&slot->async_tx.tx_list);
+               hw_desc = (char *) mv_chan->device->dma_desc_pool;
+               slot->async_tx.phys =
+                       (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+               slot->idx = idx++;
+
+               spin_lock_bh(&mv_chan->lock);
+               mv_chan->slots_allocated = idx;
+               list_add_tail(&slot->slot_node, &mv_chan->all_slots);
+               spin_unlock_bh(&mv_chan->lock);
+       }
+
+       if (mv_chan->slots_allocated && !mv_chan->last_used)
+               mv_chan->last_used = list_entry(mv_chan->all_slots.next,
+                                       struct mv_xor_desc_slot,
+                                       slot_node);
+
+       dev_dbg(mv_chan->device->common.dev,
+               "allocated %d descriptor slots last_used: %p\n",
+               mv_chan->slots_allocated, mv_chan->last_used);
+
+       return mv_chan->slots_allocated ? : -ENOMEM;
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+               size_t len, unsigned long flags)
+{
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+       struct mv_xor_desc_slot *sw_desc, *grp_start;
+       int slot_cnt;
+
+       dev_dbg(mv_chan->device->common.dev,
+               "%s dest: %x src %x len: %u flags: %ld\n",
+               __func__, dest, src, len, flags);
+       if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
+               return NULL;
+
+       BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+
+       spin_lock_bh(&mv_chan->lock);
+       slot_cnt = mv_chan_memcpy_slot_count(len);
+       sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+       if (sw_desc) {
+               sw_desc->type = DMA_MEMCPY;
+               sw_desc->async_tx.flags = flags;
+               grp_start = sw_desc->group_head;
+               mv_desc_init(grp_start, flags);
+               mv_desc_set_byte_count(grp_start, len);
+               mv_desc_set_dest_addr(sw_desc->group_head, dest);
+               mv_desc_set_src_addr(grp_start, 0, src);
+               sw_desc->unmap_src_cnt = 1;
+               sw_desc->unmap_len = len;
+       }
+       spin_unlock_bh(&mv_chan->lock);
+
+       dev_dbg(mv_chan->device->common.dev,
+               "%s sw_desc %p async_tx %p\n",
+               __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+
+       return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+                      size_t len, unsigned long flags)
+{
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+       struct mv_xor_desc_slot *sw_desc, *grp_start;
+       int slot_cnt;
+
+       dev_dbg(mv_chan->device->common.dev,
+               "%s dest: %x len: %u flags: %ld\n",
+               __func__, dest, len, flags);
+       if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
+               return NULL;
+
+       BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+
+       spin_lock_bh(&mv_chan->lock);
+       slot_cnt = mv_chan_memset_slot_count(len);
+       sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+       if (sw_desc) {
+               sw_desc->type = DMA_MEMSET;
+               sw_desc->async_tx.flags = flags;
+               grp_start = sw_desc->group_head;
+               mv_desc_init(grp_start, flags);
+               mv_desc_set_byte_count(grp_start, len);
+               mv_desc_set_dest_addr(sw_desc->group_head, dest);
+               mv_desc_set_block_fill_val(grp_start, value);
+               sw_desc->unmap_src_cnt = 1;
+               sw_desc->unmap_len = len;
+       }
+       spin_unlock_bh(&mv_chan->lock);
+       dev_dbg(mv_chan->device->common.dev,
+               "%s sw_desc %p async_tx %p \n",
+               __func__, sw_desc, &sw_desc->async_tx);
+       return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+                   unsigned int src_cnt, size_t len, unsigned long flags)
+{
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+       struct mv_xor_desc_slot *sw_desc, *grp_start;
+       int slot_cnt;
+
+       if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
+               return NULL;
+
+       BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+
+       dev_dbg(mv_chan->device->common.dev,
+               "%s src_cnt: %d len: dest %x %u flags: %ld\n",
+               __func__, src_cnt, len, dest, flags);
+
+       spin_lock_bh(&mv_chan->lock);
+       slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
+       sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+       if (sw_desc) {
+               sw_desc->type = DMA_XOR;
+               sw_desc->async_tx.flags = flags;
+               grp_start = sw_desc->group_head;
+               mv_desc_init(grp_start, flags);
+               /* the byte count field is the same as in memcpy desc*/
+               mv_desc_set_byte_count(grp_start, len);
+               mv_desc_set_dest_addr(sw_desc->group_head, dest);
+               sw_desc->unmap_src_cnt = src_cnt;
+               sw_desc->unmap_len = len;
+               while (src_cnt--)
+                       mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
+       }
+       spin_unlock_bh(&mv_chan->lock);
+       dev_dbg(mv_chan->device->common.dev,
+               "%s sw_desc %p async_tx %p \n",
+               __func__, sw_desc, &sw_desc->async_tx);
+       return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void mv_xor_free_chan_resources(struct dma_chan *chan)
+{
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+       struct mv_xor_desc_slot *iter, *_iter;
+       int in_use_descs = 0;
+
+       mv_xor_slot_cleanup(mv_chan);
+
+       spin_lock_bh(&mv_chan->lock);
+       list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+                                       chain_node) {
+               in_use_descs++;
+               list_del(&iter->chain_node);
+       }
+       list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
+                                completed_node) {
+               in_use_descs++;
+               list_del(&iter->completed_node);
+       }
+       list_for_each_entry_safe_reverse(
+               iter, _iter, &mv_chan->all_slots, slot_node) {
+               list_del(&iter->slot_node);
+               kfree(iter);
+               mv_chan->slots_allocated--;
+       }
+       mv_chan->last_used = NULL;
+
+       dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
+               __func__, mv_chan->slots_allocated);
+       spin_unlock_bh(&mv_chan->lock);
+
+       if (in_use_descs)
+               dev_err(mv_chan->device->common.dev,
+                       "freeing %d in use descriptors!\n", in_use_descs);
+}
+
+/**
+ * mv_xor_is_complete - poll the status of an XOR transaction
+ * @chan: XOR channel handle
+ * @cookie: XOR transaction identifier
+ */
+static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
+                                         dma_cookie_t cookie,
+                                         dma_cookie_t *done,
+                                         dma_cookie_t *used)
+{
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+       dma_cookie_t last_used;
+       dma_cookie_t last_complete;
+       enum dma_status ret;
+
+       last_used = chan->cookie;
+       last_complete = mv_chan->completed_cookie;
+       mv_chan->is_complete_cookie = cookie;
+       if (done)
+               *done = last_complete;
+       if (used)
+               *used = last_used;
+
+       ret = dma_async_is_complete(cookie, last_complete, last_used);
+       if (ret == DMA_SUCCESS) {
+               mv_xor_clean_completed_slots(mv_chan);
+               return ret;
+       }
+       mv_xor_slot_cleanup(mv_chan);
+
+       last_used = chan->cookie;
+       last_complete = mv_chan->completed_cookie;
+
+       if (done)
+               *done = last_complete;
+       if (used)
+               *used = last_used;
+
+       return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static void mv_dump_xor_regs(struct mv_xor_chan *chan)
+{
+       u32 val;
+
+       val = __raw_readl(XOR_CONFIG(chan));
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "config       0x%08x.\n", val);
+
+       val = __raw_readl(XOR_ACTIVATION(chan));
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "activation   0x%08x.\n", val);
+
+       val = __raw_readl(XOR_INTR_CAUSE(chan));
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "intr cause   0x%08x.\n", val);
+
+       val = __raw_readl(XOR_INTR_MASK(chan));
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "intr mask    0x%08x.\n", val);
+
+       val = __raw_readl(XOR_ERROR_CAUSE(chan));
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "error cause  0x%08x.\n", val);
+
+       val = __raw_readl(XOR_ERROR_ADDR(chan));
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "error addr   0x%08x.\n", val);
+}
+
+static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
+                                        u32 intr_cause)
+{
+       if (intr_cause & (1 << 4)) {
+            dev_dbg(chan->device->common.dev,
+                    "ignore this error\n");
+            return;
+       }
+
+       dev_printk(KERN_ERR, chan->device->common.dev,
+                  "error on chan %d. intr cause 0x%08x.\n",
+                  chan->idx, intr_cause);
+
+       mv_dump_xor_regs(chan);
+       BUG();
+}
+
+static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
+{
+       struct mv_xor_chan *chan = data;
+       u32 intr_cause = mv_chan_get_intr_cause(chan);
+
+       dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
+
+       if (mv_is_err_intr(intr_cause))
+               mv_xor_err_interrupt_handler(chan, intr_cause);
+
+       tasklet_schedule(&chan->irq_tasklet);
+
+       mv_xor_device_clear_eoc_cause(chan);
+
+       return IRQ_HANDLED;
+}
+
+static void mv_xor_issue_pending(struct dma_chan *chan)
+{
+       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+
+       if (mv_chan->pending >= MV_XOR_THRESHOLD) {
+               mv_chan->pending = 0;
+               mv_chan_activate(mv_chan);
+       }
+}
+
+/*
+ * Perform a transaction to verify the HW works.
+ */
+#define MV_XOR_TEST_SIZE 2000
+
+static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
+{
+       int i;
+       void *src, *dest;
+       dma_addr_t src_dma, dest_dma;
+       struct dma_chan *dma_chan;
+       dma_cookie_t cookie;
+       struct dma_async_tx_descriptor *tx;
+       int err = 0;
+       struct mv_xor_chan *mv_chan;
+
+       src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+       if (!src)
+               return -ENOMEM;
+
+       dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+       if (!dest) {
+               kfree(src);
+               return -ENOMEM;
+       }
+
+       /* Fill in src buffer */
+       for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+               ((u8 *) src)[i] = (u8)i;
+
+       /* Start copy, using first DMA channel */
+       dma_chan = container_of(device->common.channels.next,
+                               struct dma_chan,
+                               device_node);
+       if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       dest_dma = dma_map_single(dma_chan->device->dev, dest,
+                                 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+
+       src_dma = dma_map_single(dma_chan->device->dev, src,
+                                MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+
+       tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
+                                   MV_XOR_TEST_SIZE, 0);
+       cookie = mv_xor_tx_submit(tx);
+       mv_xor_issue_pending(dma_chan);
+       async_tx_ack(tx);
+       msleep(1);
+
+       if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+           DMA_SUCCESS) {
+               dev_printk(KERN_ERR, dma_chan->device->dev,
+                          "Self-test copy timed out, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+       mv_chan = to_mv_xor_chan(dma_chan);
+       dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+                               MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+       if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
+               dev_printk(KERN_ERR, dma_chan->device->dev,
+                          "Self-test copy failed compare, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+free_resources:
+       mv_xor_free_chan_resources(dma_chan);
+out:
+       kfree(src);
+       kfree(dest);
+       return err;
+}
+
+#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
+static int __devinit
+mv_xor_xor_self_test(struct mv_xor_device *device)
+{
+       int i, src_idx;
+       struct page *dest;
+       struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
+       dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
+       dma_addr_t dest_dma;
+       struct dma_async_tx_descriptor *tx;
+       struct dma_chan *dma_chan;
+       dma_cookie_t cookie;
+       u8 cmp_byte = 0;
+       u32 cmp_word;
+       int err = 0;
+       struct mv_xor_chan *mv_chan;
+
+       for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+               xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+               if (!xor_srcs[src_idx])
+                       while (src_idx--) {
+                               __free_page(xor_srcs[src_idx]);
+                               return -ENOMEM;
+                       }
+       }
+
+       dest = alloc_page(GFP_KERNEL);
+       if (!dest)
+               while (src_idx--) {
+                       __free_page(xor_srcs[src_idx]);
+                       return -ENOMEM;
+               }
+
+       /* Fill in src buffers */
+       for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+               u8 *ptr = page_address(xor_srcs[src_idx]);
+               for (i = 0; i < PAGE_SIZE; i++)
+                       ptr[i] = (1 << src_idx);
+       }
+
+       for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+               cmp_byte ^= (u8) (1 << src_idx);
+
+       cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+               (cmp_byte << 8) | cmp_byte;
+
+       memset(page_address(dest), 0, PAGE_SIZE);
+
+       dma_chan = container_of(device->common.channels.next,
+                               struct dma_chan,
+                               device_node);
+       if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       /* test xor */
+       dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+                               DMA_FROM_DEVICE);
+
+       for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
+               dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+                                          0, PAGE_SIZE, DMA_TO_DEVICE);
+
+       tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+                                MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+
+       cookie = mv_xor_tx_submit(tx);
+       mv_xor_issue_pending(dma_chan);
+       async_tx_ack(tx);
+       msleep(8);
+
+       if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+           DMA_SUCCESS) {
+               dev_printk(KERN_ERR, dma_chan->device->dev,
+                          "Self-test xor timed out, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+       mv_chan = to_mv_xor_chan(dma_chan);
+       dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+                               PAGE_SIZE, DMA_FROM_DEVICE);
+       for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+               u32 *ptr = page_address(dest);
+               if (ptr[i] != cmp_word) {
+                       dev_printk(KERN_ERR, dma_chan->device->dev,
+                                  "Self-test xor failed compare, disabling."
+                                  " index %d, data %x, expected %x\n", i,
+                                  ptr[i], cmp_word);
+                       err = -ENODEV;
+                       goto free_resources;
+               }
+       }
+
+free_resources:
+       mv_xor_free_chan_resources(dma_chan);
+out:
+       src_idx = MV_XOR_NUM_SRC_TEST;
+       while (src_idx--)
+               __free_page(xor_srcs[src_idx]);
+       __free_page(dest);
+       return err;
+}
+
+static int __devexit mv_xor_remove(struct platform_device *dev)
+{
+       struct mv_xor_device *device = platform_get_drvdata(dev);
+       struct dma_chan *chan, *_chan;
+       struct mv_xor_chan *mv_chan;
+       struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
+
+       dma_async_device_unregister(&device->common);
+
+       dma_free_coherent(&dev->dev, plat_data->pool_size,
+                       device->dma_desc_pool_virt, device->dma_desc_pool);
+
+       list_for_each_entry_safe(chan, _chan, &device->common.channels,
+                               device_node) {
+               mv_chan = to_mv_xor_chan(chan);
+               list_del(&chan->device_node);
+       }
+
+       return 0;
+}
+
+static int __devinit mv_xor_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       int irq;
+       struct mv_xor_device *adev;
+       struct mv_xor_chan *mv_chan;
+       struct dma_device *dma_dev;
+       struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
+
+
+       adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+       if (!adev)
+               return -ENOMEM;
+
+       dma_dev = &adev->common;
+
+       /* allocate coherent memory for hardware descriptors
+        * note: writecombine gives slightly better performance, but
+        * requires that we explicitly flush the writes
+        */
+       adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
+                                                         plat_data->pool_size,
+                                                         &adev->dma_desc_pool,
+                                                         GFP_KERNEL);
+       if (!adev->dma_desc_pool_virt)
+               return -ENOMEM;
+
+       adev->id = plat_data->hw_id;
+
+       /* discover transaction capabilites from the platform data */
+       dma_dev->cap_mask = plat_data->cap_mask;
+       adev->pdev = pdev;
+       platform_set_drvdata(pdev, adev);
+
+       adev->shared = platform_get_drvdata(plat_data->shared);
+
+       INIT_LIST_HEAD(&dma_dev->channels);
+
+       /* set base routines */
+       dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
+       dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
+       dma_dev->device_is_tx_complete = mv_xor_is_complete;
+       dma_dev->device_issue_pending = mv_xor_issue_pending;
+       dma_dev->dev = &pdev->dev;
+
+       /* set prep routines based on capability */
+       if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
+               dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+       if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
+               dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
+       if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+               dma_dev->max_xor = 8;                  ;
+               dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
+       }
+
+       mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
+       if (!mv_chan) {
+               ret = -ENOMEM;
+               goto err_free_dma;
+       }
+       mv_chan->device = adev;
+       mv_chan->idx = plat_data->hw_id;
+       mv_chan->mmr_base = adev->shared->xor_base;
+
+       if (!mv_chan->mmr_base) {
+               ret = -ENOMEM;
+               goto err_free_dma;
+       }
+       tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
+                    mv_chan);
+
+       /* clear errors before enabling interrupts */
+       mv_xor_device_clear_err_status(mv_chan);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               ret = irq;
+               goto err_free_dma;
+       }
+       ret = devm_request_irq(&pdev->dev, irq,
+                              mv_xor_interrupt_handler,
+                              0, dev_name(&pdev->dev), mv_chan);
+       if (ret)
+               goto err_free_dma;
+
+       mv_chan_unmask_interrupts(mv_chan);
+
+       mv_set_mode(mv_chan, DMA_MEMCPY);
+
+       spin_lock_init(&mv_chan->lock);
+       INIT_LIST_HEAD(&mv_chan->chain);
+       INIT_LIST_HEAD(&mv_chan->completed_slots);
+       INIT_LIST_HEAD(&mv_chan->all_slots);
+       INIT_RCU_HEAD(&mv_chan->common.rcu);
+       mv_chan->common.device = dma_dev;
+
+       list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
+
+       if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
+               ret = mv_xor_memcpy_self_test(adev);
+               dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
+               if (ret)
+                       goto err_free_dma;
+       }
+
+       if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+               ret = mv_xor_xor_self_test(adev);
+               dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
+               if (ret)
+                       goto err_free_dma;
+       }
+
+       dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
+         "( %s%s%s%s)\n",
+         dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+         dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
+         dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+         dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+
+       dma_async_device_register(dma_dev);
+       goto out;
+
+ err_free_dma:
+       dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
+                       adev->dma_desc_pool_virt, adev->dma_desc_pool);
+ out:
+       return ret;
+}
+
+static void
+mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
+                        struct mbus_dram_target_info *dram)
+{
+       void __iomem *base = msp->xor_base;
+       u32 win_enable = 0;
+       int i;
+
+       for (i = 0; i < 8; i++) {
+               writel(0, base + WINDOW_BASE(i));
+               writel(0, base + WINDOW_SIZE(i));
+               if (i < 4)
+                       writel(0, base + WINDOW_REMAP_HIGH(i));
+       }
+
+       for (i = 0; i < dram->num_cs; i++) {
+               struct mbus_dram_window *cs = dram->cs + i;
+
+               writel((cs->base & 0xffff0000) |
+                      (cs->mbus_attr << 8) |
+                      dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+               writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+               win_enable |= (1 << i);
+               win_enable |= 3 << (16 + (2 * i));
+       }
+
+       writel(win_enable, base + WINDOW_BAR_ENABLE(0));
+       writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+}
+
+static struct platform_driver mv_xor_driver = {
+       .probe          = mv_xor_probe,
+       .remove         = mv_xor_remove,
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = MV_XOR_NAME,
+       },
+};
+
+static int mv_xor_shared_probe(struct platform_device *pdev)
+{
+       struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
+       struct mv_xor_shared_private *msp;
+       struct resource *res;
+
+       dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
+
+       msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
+       if (!msp)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       msp->xor_base = devm_ioremap(&pdev->dev, res->start,
+                                    res->end - res->start + 1);
+       if (!msp->xor_base)
+               return -EBUSY;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               return -ENODEV;
+
+       msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
+                                         res->end - res->start + 1);
+       if (!msp->xor_high_base)
+               return -EBUSY;
+
+       platform_set_drvdata(pdev, msp);
+
+       /*
+        * (Re-)program MBUS remapping windows if we are asked to.
+        */
+       if (msd != NULL && msd->dram != NULL)
+               mv_xor_conf_mbus_windows(msp, msd->dram);
+
+       return 0;
+}
+
+static int mv_xor_shared_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static struct platform_driver mv_xor_shared_driver = {
+       .probe          = mv_xor_shared_probe,
+       .remove         = mv_xor_shared_remove,
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = MV_XOR_SHARED_NAME,
+       },
+};
+
+
+static int __init mv_xor_init(void)
+{
+       int rc;
+
+       rc = platform_driver_register(&mv_xor_shared_driver);
+       if (!rc) {
+               rc = platform_driver_register(&mv_xor_driver);
+               if (rc)
+                       platform_driver_unregister(&mv_xor_shared_driver);
+       }
+       return rc;
+}
+module_init(mv_xor_init);
+
+/* it's currently unsafe to unload this module */
+#if 0
+static void __exit mv_xor_exit(void)
+{
+       platform_driver_unregister(&mv_xor_driver);
+       platform_driver_unregister(&mv_xor_shared_driver);
+       return;
+}
+
+module_exit(mv_xor_exit);
+#endif
+
+MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
+MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
new file mode 100644 (file)
index 0000000..06cafe1
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2007, 2008, Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MV_XOR_H
+#define MV_XOR_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#define USE_TIMER
+#define MV_XOR_SLOT_SIZE               64
+#define MV_XOR_THRESHOLD               1
+
+#define XOR_OPERATION_MODE_XOR         0
+#define XOR_OPERATION_MODE_MEMCPY      2
+#define XOR_OPERATION_MODE_MEMSET      4
+
+#define XOR_CURR_DESC(chan)    (chan->mmr_base + 0x210 + (chan->idx * 4))
+#define XOR_NEXT_DESC(chan)    (chan->mmr_base + 0x200 + (chan->idx * 4))
+#define XOR_BYTE_COUNT(chan)   (chan->mmr_base + 0x220 + (chan->idx * 4))
+#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
+#define XOR_BLOCK_SIZE(chan)   (chan->mmr_base + 0x2C0 + (chan->idx * 4))
+#define XOR_INIT_VALUE_LOW(chan)       (chan->mmr_base + 0x2E0)
+#define XOR_INIT_VALUE_HIGH(chan)      (chan->mmr_base + 0x2E4)
+
+#define XOR_CONFIG(chan)       (chan->mmr_base + 0x10 + (chan->idx * 4))
+#define XOR_ACTIVATION(chan)   (chan->mmr_base + 0x20 + (chan->idx * 4))
+#define XOR_INTR_CAUSE(chan)   (chan->mmr_base + 0x30)
+#define XOR_INTR_MASK(chan)    (chan->mmr_base + 0x40)
+#define XOR_ERROR_CAUSE(chan)  (chan->mmr_base + 0x50)
+#define XOR_ERROR_ADDR(chan)   (chan->mmr_base + 0x60)
+#define XOR_INTR_MASK_VALUE    0x3F5
+
+#define WINDOW_BASE(w)         (0x250 + ((w) << 2))
+#define WINDOW_SIZE(w)         (0x270 + ((w) << 2))
+#define WINDOW_REMAP_HIGH(w)   (0x290 + ((w) << 2))
+#define WINDOW_BAR_ENABLE(chan)        (0x240 + ((chan) << 2))
+
+struct mv_xor_shared_private {
+       void __iomem    *xor_base;
+       void __iomem    *xor_high_base;
+};
+
+
+/**
+ * struct mv_xor_device - internal representation of a XOR device
+ * @pdev: Platform device
+ * @id: HW XOR Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+struct mv_xor_device {
+       struct platform_device          *pdev;
+       int                             id;
+       dma_addr_t                      dma_desc_pool;
+       void                            *dma_desc_pool_virt;
+       struct dma_device               common;
+       struct mv_xor_shared_private    *shared;
+};
+
+/**
+ * struct mv_xor_chan - internal representation of a XOR channel
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @lock: serializes enqueue/dequeue operations to the descriptors pool
+ * @mmr_base: memory mapped register base
+ * @idx: the index of the xor channel
+ * @chain: device chain view of the descriptors
+ * @completed_slots: slots completed by HW but still need to be acked
+ * @device: parent device
+ * @common: common dmaengine channel object members
+ * @last_used: place holder for allocation to continue from where it left off
+ * @all_slots: complete domain of slots usable by the channel
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
+ */
+struct mv_xor_chan {
+       int                     pending;
+       dma_cookie_t            completed_cookie;
+       spinlock_t              lock; /* protects the descriptor slot pool */
+       void __iomem            *mmr_base;
+       unsigned int            idx;
+       enum dma_transaction_type       current_type;
+       struct list_head        chain;
+       struct list_head        completed_slots;
+       struct mv_xor_device    *device;
+       struct dma_chan         common;
+       struct mv_xor_desc_slot *last_used;
+       struct list_head        all_slots;
+       int                     slots_allocated;
+       struct tasklet_struct   irq_tasklet;
+#ifdef USE_TIMER
+       unsigned long           cleanup_time;
+       u32                     current_on_last_cleanup;
+       dma_cookie_t            is_complete_cookie;
+#endif
+};
+
+/**
+ * struct mv_xor_desc_slot - software descriptor
+ * @slot_node: node on the mv_xor_chan.all_slots list
+ * @chain_node: node on the mv_xor_chan.chain list
+ * @completed_node: node on the mv_xor_chan.completed_slots list
+ * @hw_desc: virtual address of the hardware descriptor chain
+ * @phys: hardware address of the hardware descriptor chain
+ * @group_head: first operation in a transaction
+ * @slot_cnt: total slots used in an transaction (group of operations)
+ * @slots_per_op: number of slots per operation
+ * @idx: pool index
+ * @unmap_src_cnt: number of xor sources
+ * @unmap_len: transaction bytecount
+ * @async_tx: support for the async_tx api
+ * @group_list: list of slots that make up a multi-descriptor transaction
+ *     for example transfer lengths larger than the supported hw max
+ * @xor_check_result: result of zero sum
+ * @crc32_result: result crc calculation
+ */
+struct mv_xor_desc_slot {
+       struct list_head        slot_node;
+       struct list_head        chain_node;
+       struct list_head        completed_node;
+       enum dma_transaction_type       type;
+       void                    *hw_desc;
+       struct mv_xor_desc_slot *group_head;
+       u16                     slot_cnt;
+       u16                     slots_per_op;
+       u16                     idx;
+       u16                     unmap_src_cnt;
+       u32                     value;
+       size_t                  unmap_len;
+       struct dma_async_tx_descriptor  async_tx;
+       union {
+               u32             *xor_check_result;
+               u32             *crc32_result;
+       };
+#ifdef USE_TIMER
+       unsigned long           arrival_time;
+       struct timer_list       timeout;
+#endif
+};
+
+/* This structure describes XOR descriptor size 64bytes        */
+struct mv_xor_desc {
+       u32 status;             /* descriptor execution status */
+       u32 crc32_result;       /* result of CRC-32 calculation */
+       u32 desc_command;       /* type of operation to be carried out */
+       u32 phy_next_desc;      /* next descriptor address pointer */
+       u32 byte_count;         /* size of src/dst blocks in bytes */
+       u32 phy_dest_addr;      /* destination block address */
+       u32 phy_src_addr[8];    /* source block addresses */
+       u32 reserved0;
+       u32 reserved1;
+};
+
+#define to_mv_sw_desc(addr_hw_desc)            \
+       container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
+
+#define mv_hw_desc_slot_idx(hw_desc, idx)      \
+       ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
+
+#define MV_XOR_MIN_BYTE_COUNT  (128)
+#define XOR_MAX_BYTE_COUNT     ((16 * 1024 * 1024) - 1)
+#define MV_XOR_MAX_BYTE_COUNT  XOR_MAX_BYTE_COUNT
+
+
+#endif
index f43d6d3cf2fa2429d5689bbe05e87cf0f99358e0..426ac5add585c03df6dbdae3c8b2baca175ef043 100644 (file)
@@ -780,7 +780,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
  */
 static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
 {
-       __le64 x;
+       u64 x;
        u64 m = (1ULL << n) - 1;
 
        if (n > 32)
@@ -796,10 +796,10 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
        report += offset >> 3;
        offset &= 7;
 
-       x = get_unaligned((__le64 *)report);
-       x &= cpu_to_le64(~(m << offset));
-       x |= cpu_to_le64(((u64) value) << offset);
-       put_unaligned(x, (__le64 *) report);
+       x = get_unaligned_le64(report);
+       x &= ~(m << offset);
+       x |= ((u64)value) << offset;
+       put_unaligned_le64(x, report);
 }
 
 /*
index 4c2052c658f1465f6a8d89494b03a8783a7ba6e3..16feea014494284e9d165af05f8b47f993557f38 100644 (file)
@@ -89,6 +89,29 @@ static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_de
        return 1;
 }
 
+static int quirk_gyration_remote(struct hid_usage *usage, struct input_dev *input,
+                             unsigned long **bit, int *max)
+{
+       if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
+               return 0;
+
+       set_bit(EV_REP, input->evbit);
+       switch(usage->hid & HID_USAGE) {
+               /* Reported on Gyration MCE Remote */
+               case 0x00d: map_key_clear(KEY_HOME);            break;
+               case 0x024: map_key_clear(KEY_DVD);             break;
+               case 0x025: map_key_clear(KEY_PVR);             break;
+               case 0x046: map_key_clear(KEY_MEDIA);           break;
+               case 0x047: map_key_clear(KEY_MP3);             break;
+               case 0x049: map_key_clear(KEY_CAMERA);          break;
+               case 0x04a: map_key_clear(KEY_VIDEO);           break;
+
+               default:
+                       return 0;
+       }
+       return 1;
+}
+
 static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
                              unsigned long **bit, int *max)
 {
@@ -303,6 +326,9 @@ static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *inp
 #define VENDOR_ID_EZKEY                                0x0518
 #define DEVICE_ID_BTC_8193                     0x0002
 
+#define VENDOR_ID_GYRATION                     0x0c16
+#define DEVICE_ID_GYRATION_REMOTE              0x0002
+
 #define VENDOR_ID_LOGITECH                     0x046d
 #define DEVICE_ID_LOGITECH_RECEIVER            0xc101
 #define DEVICE_ID_S510_RECEIVER                        0xc50c
@@ -337,6 +363,8 @@ static const struct hid_input_blacklist {
 
        { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
 
+       { VENDOR_ID_GYRATION, DEVICE_ID_GYRATION_REMOTE, quirk_gyration_remote },
+
        { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
        { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
        { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
@@ -438,6 +466,18 @@ int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struc
                input_event(input, usage->type, REL_WHEEL, -value);
                return 1;
        }
+
+       /* Gyration MCE remote "Sleep" key */
+       if (hid->vendor == VENDOR_ID_GYRATION &&
+           hid->product == DEVICE_ID_GYRATION_REMOTE &&
+           (usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
+           (usage->hid & 0xff) == 0x82) {
+               input_event(input, usage->type, usage->code, 1);
+               input_sync(input);
+               input_event(input, usage->type, usage->code, 0);
+               input_sync(input);
+               return 1;
+       }
        return 0;
 }
 
index 5c52a20ad3447abd4cd4b94d7bc37f14746ab230..1b2e8dc3398d7bf972243507d695f14e2093aaa0 100644 (file)
@@ -100,6 +100,8 @@ static struct hidinput_key_translation apple_fn_keys[] = {
        { KEY_F2,       KEY_BRIGHTNESSUP,   APPLE_FLAG_FKEY },
        { KEY_F3,       KEY_FN_F5,          APPLE_FLAG_FKEY }, /* Exposé */
        { KEY_F4,       KEY_FN_F4,          APPLE_FLAG_FKEY }, /* Dashboard */
+       { KEY_F5,       KEY_KBDILLUMDOWN,   APPLE_FLAG_FKEY },
+       { KEY_F6,       KEY_KBDILLUMUP,     APPLE_FLAG_FKEY },
        { KEY_F7,       KEY_PREVIOUSSONG,   APPLE_FLAG_FKEY },
        { KEY_F8,       KEY_PLAYPAUSE,      APPLE_FLAG_FKEY },
        { KEY_F9,       KEY_NEXTSONG,       APPLE_FLAG_FKEY },
@@ -612,6 +614,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                case 0x0b6: map_key_clear(KEY_PREVIOUSSONG);    break;
                                case 0x0b7: map_key_clear(KEY_STOPCD);          break;
                                case 0x0b8: map_key_clear(KEY_EJECTCD);         break;
+                               case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT);    break;
 
                                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
index 0c6b4d4e7e2700ee9135d2729b8c388f7d8c6118..c40f0403edafe1390d5fecb66604e095c8f70057 100644 (file)
@@ -105,6 +105,7 @@ out:
 static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
 {
        unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       /* FIXME: What stops hidraw_table going NULL */
        struct hid_device *dev = hidraw_table[minor]->hid;
        __u8 *buf;
        int ret = 0;
@@ -211,38 +212,43 @@ static int hidraw_release(struct inode * inode, struct file * file)
                        kfree(list->hidraw);
        }
 
+       kfree(list);
+
        return 0;
 }
 
-static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long hidraw_ioctl(struct file *file, unsigned int cmd,
+                                                       unsigned long arg)
 {
+       struct inode *inode = file->f_path.dentry->d_inode;
        unsigned int minor = iminor(inode);
+       long ret = 0;
+       /* FIXME: What stops hidraw_table going NULL */
        struct hidraw *dev = hidraw_table[minor];
        void __user *user_arg = (void __user*) arg;
 
+       lock_kernel();
        switch (cmd) {
                case HIDIOCGRDESCSIZE:
                        if (put_user(dev->hid->rsize, (int __user *)arg))
-                               return -EFAULT;
-                       return 0;
+                               ret = -EFAULT;
+                       break;
 
                case HIDIOCGRDESC:
                        {
                                __u32 len;
 
                                if (get_user(len, (int __user *)arg))
-                                       return -EFAULT;
-
-                               if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
-                                       return -EINVAL;
-
-                               if (copy_to_user(user_arg + offsetof(
-                                                               struct hidraw_report_descriptor,
-                                                               value[0]),
-                                                       dev->hid->rdesc,
-                                                       min(dev->hid->rsize, len)))
-                                               return -EFAULT;
-                               return 0;
+                                       ret = -EFAULT;
+                               else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
+                                       ret = -EINVAL;
+                               else if (copy_to_user(user_arg + offsetof(
+                                       struct hidraw_report_descriptor,
+                                       value[0]),
+                                       dev->hid->rdesc,
+                                       min(dev->hid->rsize, len)))
+                                       ret = -EFAULT;
+                               break;
                        }
                case HIDIOCGRAWINFO:
                        {
@@ -252,15 +258,13 @@ static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd
                                dinfo.vendor = dev->hid->vendor;
                                dinfo.product = dev->hid->product;
                                if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
-                                       return -EFAULT;
-
-                               return 0;
+                                       ret = -EFAULT;
+                               break;
                        }
                default:
-                       printk(KERN_EMERG "hidraw: unsupported ioctl() %x\n",
-                                       cmd);
+                       ret = -ENOTTY;
        }
-       return -EINVAL;
+       return ret;
 }
 
 static const struct file_operations hidraw_ops = {
@@ -270,7 +274,7 @@ static const struct file_operations hidraw_ops = {
        .poll =         hidraw_poll,
        .open =         hidraw_open,
        .release =      hidraw_release,
-       .ioctl =        hidraw_ioctl,
+       .unlocked_ioctl = hidraw_ioctl,
 };
 
 void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
index 1df832a8fcbc6251f180d25ce809f11c803bcec9..61e78a4369b9e811952bf0e36c450b46129b0923 100644 (file)
 #define USB_DEVICE_ID_APPLE_ALU_ANSI   0x0220
 #define USB_DEVICE_ID_APPLE_ALU_ISO    0x0221
 #define USB_DEVICE_ID_APPLE_ALU_JIS    0x0222
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI    0x0223
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO     0x0224
+#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS     0x0225
 #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI    0x0229
 #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO     0x022a
 #define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS     0x022b
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI  0x022c
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO   0x022d
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS   0x022e
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI   0x0230
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO    0x0231
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS    0x0232
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY   0x030a
 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY    0x030b
 #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
 #define USB_DEVICE_ID_LD_MACHINETEST   0x2040
 
 #define USB_VENDOR_ID_LOGITECH         0x046d
+#define USB_DEVICE_ID_LOGITECH_LX3     0xc044
+#define USB_DEVICE_ID_LOGITECH_V150    0xc047
 #define USB_DEVICE_ID_LOGITECH_RECEIVER        0xc101
 #define USB_DEVICE_ID_LOGITECH_HARMONY  0xc110
 #define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111
 #define USB_DEVICE_ID_S510_RECEIVER_2  0xc517
 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500  0xc512
 #define USB_DEVICE_ID_MX3000_RECEIVER  0xc513
+#define USB_DEVICE_ID_DINOVO_DESKTOP   0xc704
 #define USB_DEVICE_ID_DINOVO_EDGE      0xc714
 #define USB_DEVICE_ID_DINOVO_MINI      0xc71f
 
@@ -443,7 +452,8 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
-       
+
+       { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP, HID_QUIRK_DUPLICATE_USAGES },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
 
@@ -593,6 +603,8 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
+       { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3, HID_QUIRK_INVERT_HWHEEL },
+       { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150, HID_QUIRK_INVERT_HWHEEL },
 
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
@@ -642,6 +654,12 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
        { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
        { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
        { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
        { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
 
@@ -1128,7 +1146,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
                         && rdesc[557] == 0x19
                         && rdesc[559] == 0x29) {
                printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
-               rdesc[284] = rdesc[304] = rdesc[558] = 0x35;
+               rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
                rdesc[352] = 0x36;
                rdesc[286] = rdesc[355] = 0x46;
                rdesc[306] = rdesc[559] = 0x45;
index 95cc192bc7af7573087d2ce67e634371d40caf6e..842e9edb888ed889c27c19fd2ca705c3da38010f 100644 (file)
@@ -406,6 +406,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
        uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
        if (!uref_multi)
                return -ENOMEM;
+       lock_kernel();
        uref = &uref_multi->uref;
        if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
                if (copy_from_user(uref_multi, user_arg,
@@ -501,12 +502,15 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
                }
 
 goodreturn:
+               unlock_kernel();
                kfree(uref_multi);
                return 0;
 fault:
+               unlock_kernel();
                kfree(uref_multi);
                return -EFAULT;
 inval:
+               unlock_kernel();
                kfree(uref_multi);
                return -EINVAL;
        }
@@ -540,7 +544,7 @@ static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd,
        return len;
 }
 
-static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct hiddev_list *list = file->private_data;
        struct hiddev *hiddev = list->hiddev;
@@ -555,7 +559,10 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
        struct usbhid_device *usbhid = hid->driver_data;
        void __user *user_arg = (void __user *)arg;
        int i;
+       
+       /* Called without BKL by compat methods so no BKL taken */
 
+       /* FIXME: Who or what stop this racing with a disconnect ?? */
        if (!hiddev->exist)
                return -EIO;
 
@@ -756,8 +763,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
 #ifdef CONFIG_COMPAT
 static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
-       return hiddev_ioctl(inode, file, cmd, (unsigned long)compat_ptr(arg));
+       return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
 }
 #endif
 
@@ -768,7 +774,7 @@ static const struct file_operations hiddev_fops = {
        .poll =         hiddev_poll,
        .open =         hiddev_open,
        .release =      hiddev_release,
-       .ioctl =        hiddev_ioctl,
+       .unlocked_ioctl =       hiddev_ioctl,
        .fasync =       hiddev_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = hiddev_compat_ioctl,
index 3cd46d2e53c1851828b31df4fc85da846381b3b5..0caaafe018438db68b33acc4ac0d695c717d4058 100644 (file)
@@ -43,7 +43,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE(DRIVER_LICENSE);
 
-static unsigned char usb_kbd_keycode[256] = {
+static const unsigned char usb_kbd_keycode[256] = {
          0,  0,  0,  0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
         50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44,  2,  3,
          4,  5,  6,  7,  8,  9, 10, 11, 28,  1, 14, 15, 57, 12, 13, 26,
@@ -233,14 +233,6 @@ static int usb_kbd_probe(struct usb_interface *iface,
        if (!usb_endpoint_is_int_in(endpoint))
                return -ENODEV;
 
-#ifdef CONFIG_USB_HID
-       if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
-                               le16_to_cpu(dev->descriptor.idProduct))
-                       & HID_QUIRK_IGNORE) {
-               return -ENODEV;
-       }
-#endif
-
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
        maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
 
index 703e9d0e871460df091ce77d8cd35697a09424e3..35689ef172cc5240fdb4a7882ccaf13fbe3913df 100644 (file)
@@ -129,14 +129,6 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
        if (!usb_endpoint_is_int_in(endpoint))
                return -ENODEV;
 
-#ifdef CONFIG_USB_HID
-       if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
-                               le16_to_cpu(dev->descriptor.idProduct))
-                       & (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
-               return -ENODEV;
-       }
-#endif
-
        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
        maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
 
index 15b09b89588a033b1e6f967b05f5fe8615286e08..04d9c4d459d00f61e77efcee48ea91797dc0e0a0 100644 (file)
@@ -510,6 +510,7 @@ config BLK_DEV_TRIFLEX
 
 config BLK_DEV_CY82C693
        tristate "CY82C693 chipset support"
+       depends on ALPHA
        select IDE_TIMINGS
        select BLK_DEV_IDEDMA_PCI
        help
@@ -548,6 +549,7 @@ config BLK_DEV_CS5535
 
 config BLK_DEV_HPT34X
        tristate "HPT34X chipset support"
+       depends on BROKEN
        select BLK_DEV_IDEDMA_PCI
        help
          This driver adds up to 4 more EIDE devices sharing a single
index 52f58c88578337a1029265250ca8775b89bf90e4..f575e8341aec8f3e2580486e6f0432045bb73bbc 100644 (file)
@@ -72,7 +72,7 @@ struct icside_state {
        void __iomem *ioc_base;
        unsigned int sel;
        unsigned int type;
-       ide_hwif_t *hwif[2];
+       struct ide_host *host;
 };
 
 #define ICS_TYPE_A3IN  0
@@ -375,12 +375,14 @@ static int icside_dma_test_irq(ide_drive_t *drive)
 
 static void icside_dma_timeout(ide_drive_t *drive)
 {
+       ide_hwif_t *hwif = drive->hwif;
+
        printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
 
        if (icside_dma_test_irq(drive))
                return;
 
-       ide_dump_status(drive, "DMA timeout", ide_read_status(drive));
+       ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
 
        icside_dma_end(drive);
 }
@@ -440,10 +442,10 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
 static int __init
 icside_register_v5(struct icside_state *state, struct expansion_card *ec)
 {
-       ide_hwif_t *hwif;
        void __iomem *base;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw;
+       struct ide_host *host;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+       int ret;
 
        base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
        if (!base)
@@ -463,22 +465,23 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
 
        icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
 
-       hwif = ide_find_port();
-       if (!hwif)
+       host = ide_host_alloc(NULL, hws);
+       if (host == NULL)
                return -ENODEV;
 
-       ide_init_port_hw(hwif, &hw);
-       default_hwif_mmiops(hwif);
-
-       state->hwif[0] = hwif;
+       state->host = host;
 
        ecard_set_drvdata(ec, state);
 
-       idx[0] = hwif->index;
-
-       ide_device_add(idx, NULL);
+       ret = ide_host_register(host, NULL, hws);
+       if (ret)
+               goto err_free;
 
        return 0;
+err_free:
+       ide_host_free(host);
+       ecard_set_drvdata(ec, NULL);
+       return ret;
 }
 
 static const struct ide_port_info icside_v6_port_info __initdata = {
@@ -493,13 +496,12 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
 static int __init
 icside_register_v6(struct icside_state *state, struct expansion_card *ec)
 {
-       ide_hwif_t *hwif, *mate;
        void __iomem *ioc_base, *easi_base;
+       struct ide_host *host;
        unsigned int sel = 0;
        int ret;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
        struct ide_port_info d = icside_v6_port_info;
-       hw_regs_t hw[2];
 
        ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
        if (!ioc_base) {
@@ -538,28 +540,11 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
        icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
        icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
 
-       /*
-        * Find and register the interfaces.
-        */
-       hwif = ide_find_port();
-       if (hwif == NULL)
+       host = ide_host_alloc(&d, hws);
+       if (host == NULL)
                return -ENODEV;
 
-       ide_init_port_hw(hwif, &hw[0]);
-       default_hwif_mmiops(hwif);
-
-       idx[0] = hwif->index;
-
-       mate = ide_find_port();
-       if (mate) {
-               ide_init_port_hw(mate, &hw[1]);
-               default_hwif_mmiops(mate);
-
-               idx[1] = mate->index;
-       }
-
-       state->hwif[0]    = hwif;
-       state->hwif[1]    = mate;
+       state->host = host;
 
        ecard_set_drvdata(ec, state);
 
@@ -569,11 +554,17 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
                d.dma_ops = NULL;
        }
 
-       ide_device_add(idx, &d);
+       ret = ide_host_register(host, NULL, hws);
+       if (ret)
+               goto err_free;
 
        return 0;
-
- out:
+err_free:
+       ide_host_free(host);
+       if (d.dma_ops)
+               free_dma(ec->dma);
+       ecard_set_drvdata(ec, NULL);
+out:
        return ret;
 }
 
index 2f311da4c963bf5b26ef2dc391f1555de2690076..176532ffae0efcc5b2b28fffef70de95da378475 100644 (file)
 
 static int __init ide_arm_init(void)
 {
-       ide_hwif_t *hwif;
-       hw_regs_t hw;
        unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        if (!request_region(base, 8, DRV_NAME)) {
                printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -51,15 +49,7 @@ static int __init ide_arm_init(void)
        hw.irq = IDE_ARM_IRQ;
        hw.chipset = ide_generic;
 
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw);
-               idx[0] = hwif->index;
-
-               ide_device_add(idx, NULL);
-       }
-
-       return 0;
+       return ide_host_add(NULL, hws, NULL);
 }
 
 module_init(ide_arm_init);
index c79b85b6e4a34e82b135cee906842656bc3652a9..65bb4b8fd57086946e02c2c09bf907fa9909980b 100644 (file)
@@ -316,15 +316,14 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
 static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
                                          const struct ide_port_info *d)
 {
-       unsigned long base =
-               hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
-
        printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);
 
        if (ide_allocate_dma_engine(hwif))
                return -1;
 
-       ide_setup_dma(hwif, base);
+       hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
+
+       hwif->dma_ops = &sff_dma_ops;
 
        return 0;
 }
@@ -348,11 +347,10 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
 {
        struct clk *clk;
        struct resource *mem, *irq;
-       ide_hwif_t *hwif;
+       struct ide_host *host;
        unsigned long base, rate;
-       int i;
-       hw_regs_t hw;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       int i, rc;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        clk = clk_get(NULL, "IDECLK");
        if (IS_ERR(clk))
@@ -394,24 +392,14 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
        hw.irq = irq->start;
        hw.chipset = ide_palm3710;
 
-       hwif = ide_find_port();
-       if (hwif == NULL)
+       rc = ide_host_add(&palm_bk3710_port_info, hws, NULL);
+       if (rc)
                goto out;
 
-       i = hwif->index;
-
-       ide_init_port_hw(hwif, &hw);
-
-       default_hwif_mmiops(hwif);
-
-       idx[0] = i;
-
-       ide_device_add(idx, &palm_bk3710_port_info);
-
        return 0;
 out:
        printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
-       return -ENODEV;
+       return rc;
 }
 
 /* work with hotplug and coldplug */
index 43057e0303c89016e1098f6b410d6870c8cd5bf0..2bdd8b734afb6221aed29958c9a696e4f341614b 100644 (file)
@@ -32,11 +32,10 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
 static int __devinit
 rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
 {
-       ide_hwif_t *hwif;
        void __iomem *base;
+       struct ide_host *host;
        int ret;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        ret = ecard_request_resources(ec);
        if (ret)
@@ -53,20 +52,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
        hw.chipset = ide_generic;
        hw.dev = &ec->dev;
 
-       hwif = ide_find_port();
-       if (hwif == NULL) {
-               ret = -ENOENT;
+       ret = ide_host_add(&rapide_port_info, hws, &host);
+       if (ret)
                goto release;
-       }
-
-       ide_init_port_hw(hwif, &hw);
-       default_hwif_mmiops(hwif);
-
-       idx[0] = hwif->index;
-
-       ide_device_add(idx, &rapide_port_info);
 
-       ecard_set_drvdata(ec, hwif);
+       ecard_set_drvdata(ec, host);
        goto out;
 
  release:
@@ -77,11 +67,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
 
 static void __devexit rapide_remove(struct expansion_card *ec)
 {
-       ide_hwif_t *hwif = ecard_get_drvdata(ec);
+       struct ide_host *host = ecard_get_drvdata(ec);
 
        ecard_set_drvdata(ec, NULL);
 
-       ide_unregister(hwif);
+       ide_host_remove(host);
 
        ecard_release_resources(ec);
 }
index 20fad6d542cc75178dee5cd0629ad6d890f8a88a..bde7a585f1987e3488e318982754143399a3709e 100644 (file)
@@ -100,6 +100,8 @@ static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
        /* be sure we're looking at the low order bits */
        outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
 
+       if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+               tf->feature = inb(io_ports->feature_addr);
        if (task->tf_flags & IDE_TFLAG_IN_NSECT)
                tf->nsect  = inb(io_ports->nsect_addr);
        if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -153,6 +155,21 @@ static void h8300_output_data(ide_drive_t *drive, struct request *rq,
        mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
 }
 
+static const struct ide_tp_ops h8300_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = ide_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = ide_set_irq,
+
+       .tf_load                = h8300_tf_load,
+       .tf_read                = h8300_tf_read,
+
+       .input_data             = h8300_input_data,
+       .output_data            = h8300_output_data,
+};
+
 #define H8300_IDE_GAP (2)
 
 static inline void hw_setup(hw_regs_t *hw)
@@ -167,27 +184,14 @@ static inline void hw_setup(hw_regs_t *hw)
        hw->chipset = ide_generic;
 }
 
-static inline void hwif_setup(ide_hwif_t *hwif)
-{
-       default_hwif_iops(hwif);
-
-       hwif->tf_load = h8300_tf_load;
-       hwif->tf_read = h8300_tf_read;
-
-       hwif->input_data  = h8300_input_data;
-       hwif->output_data = h8300_output_data;
-}
-
 static const struct ide_port_info h8300_port_info = {
+       .tp_ops                 = &h8300_tp_ops,
        .host_flags             = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
 };
 
 static int __init h8300_ide_init(void)
 {
-       hw_regs_t hw;
-       ide_hwif_t *hwif;
-       int index;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
 
@@ -200,19 +204,7 @@ static int __init h8300_ide_init(void)
 
        hw_setup(&hw);
 
-       hwif = ide_find_port_slot(&h8300_port_info);
-       if (hwif == NULL)
-               return -ENOENT;
-
-       index = hwif->index;
-       ide_init_port_hw(hwif, &hw);
-       hwif_setup(hwif);
-
-       idx[0] = index;
-
-       ide_device_add(idx, &h8300_port_info);
-
-       return 0;
+       return ide_host_add(&h8300_port_info, hws, NULL);
 
 out_busy:
        printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
index 2802031de670a3bfa3f211e9c6a31a184e040948..adf04f99cdebb20f9d63e15c0010fa34b6db5e39 100644 (file)
@@ -22,6 +22,8 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
        void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int))
 {
        ide_hwif_t *hwif = drive->hwif;
+       struct request *rq = hwif->hwgroup->rq;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        xfer_func_t *xferfunc;
        unsigned int temp;
        u16 bcount;
@@ -30,12 +32,12 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
        debug_log("Enter %s - interrupt handler\n", __func__);
 
        if (pc->flags & PC_FLAG_TIMEDOUT) {
-               pc->callback(drive);
+               drive->pc_callback(drive);
                return ide_stopped;
        }
 
        /* Clear the interrupt */
-       stat = ide_read_status(drive);
+       stat = tp_ops->read_status(hwif);
 
        if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
                if (hwif->dma_ops->dma_end(drive) ||
@@ -63,8 +65,9 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
                local_irq_enable_in_hardirq();
 
                if (drive->media == ide_tape && !scsi &&
-                   (stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
+                   (stat & ERR_STAT) && rq->cmd[0] == REQUEST_SENSE)
                        stat &= ~ERR_STAT;
+
                if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
                        /* Error detected */
                        debug_log("%s: I/O error\n", drive->name);
@@ -75,16 +78,17 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
                                        goto cmd_finished;
                        }
 
-                       if (pc->c[0] == REQUEST_SENSE) {
+                       if (rq->cmd[0] == REQUEST_SENSE) {
                                printk(KERN_ERR "%s: I/O error in request sense"
                                                " command\n", drive->name);
                                return ide_do_reset(drive);
                        }
 
-                       debug_log("[cmd %x]: check condition\n", pc->c[0]);
+                       debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
 
                        /* Retry operation */
                        retry_pc(drive);
+
                        /* queued, but not started */
                        return ide_stopped;
                }
@@ -95,8 +99,10 @@ cmd_finished:
                        dsc_handle(drive);
                        return ide_stopped;
                }
+
                /* Command finished - Call the callback function */
-               pc->callback(drive);
+               drive->pc_callback(drive);
+
                return ide_stopped;
        }
 
@@ -107,16 +113,15 @@ cmd_finished:
                ide_dma_off(drive);
                return ide_do_reset(drive);
        }
-       /* Get the number of bytes to transfer on this interrupt. */
-       bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
-                 hwif->INB(hwif->io_ports.lbam_addr);
 
-       ireason = hwif->INB(hwif->io_ports.nsect_addr);
+       /* Get the number of bytes to transfer on this interrupt. */
+       ide_read_bcount_and_ireason(drive, &bcount, &ireason);
 
        if (ireason & CD) {
                printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__);
                return ide_do_reset(drive);
        }
+
        if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
                /* Hopefully, we will never get here */
                printk(KERN_ERR "%s: We wanted to %s, but the device wants us "
@@ -125,6 +130,7 @@ cmd_finished:
                                (ireason & IO) ? "Read" : "Write");
                return ide_do_reset(drive);
        }
+
        if (!(pc->flags & PC_FLAG_WRITING)) {
                /* Reading - Check that we have enough space */
                temp = pc->xferred + bcount;
@@ -142,7 +148,7 @@ cmd_finished:
                                        if (pc->sg)
                                                io_buffers(drive, pc, temp, 0);
                                        else
-                                               hwif->input_data(drive, NULL,
+                                               tp_ops->input_data(drive, NULL,
                                                        pc->cur_pos, temp);
                                        printk(KERN_ERR "%s: transferred %d of "
                                                        "%d bytes\n",
@@ -159,9 +165,9 @@ cmd_finished:
                        debug_log("The device wants to send us more data than "
                                  "expected - allowing transfer\n");
                }
-               xferfunc = hwif->input_data;
+               xferfunc = tp_ops->input_data;
        } else
-               xferfunc = hwif->output_data;
+               xferfunc = tp_ops->output_data;
 
        if ((drive->media == ide_floppy && !scsi && !pc->buf) ||
            (drive->media == ide_tape && !scsi && pc->bh) ||
@@ -175,7 +181,7 @@ cmd_finished:
        pc->cur_pos += bcount;
 
        debug_log("[cmd %x] transferred %d bytes on that intr.\n",
-                 pc->c[0], bcount);
+                 rq->cmd[0], bcount);
 
        /* And set the interrupt handler again */
        ide_set_handler(drive, handler, timeout, expiry);
@@ -183,16 +189,27 @@ cmd_finished:
 }
 EXPORT_SYMBOL_GPL(ide_pc_intr);
 
+static u8 ide_read_ireason(ide_drive_t *drive)
+{
+       ide_task_t task;
+
+       memset(&task, 0, sizeof(task));
+       task.tf_flags = IDE_TFLAG_IN_NSECT;
+
+       drive->hwif->tp_ops->tf_read(drive, &task);
+
+       return task.tf.nsect & 3;
+}
+
 static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
 {
-       ide_hwif_t *hwif = drive->hwif;
        int retries = 100;
 
        while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
                printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
                                "a packet command, retrying\n", drive->name);
                udelay(100);
-               ireason = hwif->INB(hwif->io_ports.nsect_addr);
+               ireason = ide_read_ireason(drive);
                if (retries == 0) {
                        printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
                                        "a packet command, ignoring\n",
@@ -210,6 +227,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
                                ide_expiry_t *expiry)
 {
        ide_hwif_t *hwif = drive->hwif;
+       struct request *rq = hwif->hwgroup->rq;
        ide_startstop_t startstop;
        u8 ireason;
 
@@ -219,7 +237,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
                return startstop;
        }
 
-       ireason = hwif->INB(hwif->io_ports.nsect_addr);
+       ireason = ide_read_ireason(drive);
        if (drive->media == ide_tape && !drive->scsi)
                ireason = ide_wait_ireason(drive, ireason);
 
@@ -239,8 +257,8 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
        }
 
        /* Send the actual packet */
-       if ((pc->flags & PC_FLAG_ZIP_DRIVE) == 0)
-               hwif->output_data(drive, NULL, pc->c, 12);
+       if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
+               hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12);
 
        return ide_started;
 }
@@ -284,7 +302,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
                           bcount, dma);
 
        /* Issue the packet command */
-       if (pc->flags & PC_FLAG_DRQ_INTERRUPT) {
+       if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
                ide_execute_command(drive, WIN_PACKETCMD, handler,
                                    timeout, NULL);
                return ide_started;
index 6e29dd5320901c65cfe9f9b985cf28c8997832ac..4e73aeee40534f553c65075c6b1b4730c2253123 100644 (file)
@@ -85,10 +85,8 @@ static void ide_cd_put(struct cdrom_info *cd)
 /* Mark that we've seen a media change and invalidate our internal buffers. */
 static void cdrom_saw_media_change(ide_drive_t *drive)
 {
-       struct cdrom_info *cd = drive->driver_data;
-
-       cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
-       cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
+       drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
+       drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
 }
 
 static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -280,11 +278,12 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
  */
 static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
 {
-       struct request *rq = HWGROUP(drive)->rq;
+       ide_hwif_t *hwif = drive->hwif;
+       struct request *rq = hwif->hwgroup->rq;
        int stat, err, sense_key;
 
        /* check for errors */
-       stat = ide_read_status(drive);
+       stat = hwif->tp_ops->read_status(hwif);
 
        if (stat_ret)
                *stat_ret = stat;
@@ -528,7 +527,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
        ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL,
                           xferlen, info->dma);
 
-       if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
+       if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
                /* waiting for CDB interrupt, not DMA yet. */
                if (info->dma)
                        drive->waiting_for_dma = 0;
@@ -560,7 +559,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
        struct cdrom_info *info = drive->driver_data;
        ide_startstop_t startstop;
 
-       if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
+       if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
                /*
                 * Here we should have been called after receiving an interrupt
                 * from the device.  DRQ should how be set.
@@ -589,7 +588,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
                cmd_len = ATAPI_MIN_CDB_BYTES;
 
        /* send the command to the device */
-       hwif->output_data(drive, NULL, rq->cmd, cmd_len);
+       hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
 
        /* start the DMA if need be */
        if (info->dma)
@@ -606,6 +605,8 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
 static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
                                int len, int ireason, int rw)
 {
+       ide_hwif_t *hwif = drive->hwif;
+
        /*
         * ireason == 0: the drive wants to receive data from us
         * ireason == 2: the drive is expecting to transfer data to us
@@ -624,7 +625,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
                 * Some drives (ASUS) seem to tell us that status info is
                 * available.  Just get it and ignore.
                 */
-               (void)ide_read_status(drive);
+               (void)hwif->tp_ops->read_status(hwif);
                return 0;
        } else {
                /* drive wants a command packet, or invalid ireason... */
@@ -645,20 +646,18 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
  */
 static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
 {
-       struct cdrom_info *cd = drive->driver_data;
-
        if ((len % SECTOR_SIZE) == 0)
                return 0;
 
        printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
                        drive->name, __func__, len);
 
-       if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES)
+       if (drive->atapi_flags & IDE_AFLAG_LIMIT_NFRAMES)
                printk(KERN_ERR "  This drive is not supported by "
                                "this version of the driver\n");
        else {
                printk(KERN_ERR "  Trying to limit transfer sizes\n");
-               cd->cd_flags |= IDE_CD_FLAG_LIMIT_NFRAMES;
+               drive->atapi_flags |= IDE_AFLAG_LIMIT_NFRAMES;
        }
 
        return 1;
@@ -735,7 +734,7 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
        if (cdrom_decode_status(drive, 0, &stat))
                return ide_stopped;
 
-       info->cd_flags |= IDE_CD_FLAG_SEEKING;
+       drive->atapi_flags |= IDE_AFLAG_SEEKING;
 
        if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
                if (--retry == 0)
@@ -892,10 +891,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        struct request *rq = HWGROUP(drive)->rq;
        xfer_func_t *xferfunc;
        ide_expiry_t *expiry = NULL;
-       int dma_error = 0, dma, stat, ireason, len, thislen, uptodate = 0;
+       int dma_error = 0, dma, stat, thislen, uptodate = 0;
        int write = (rq_data_dir(rq) == WRITE) ? 1 : 0;
        unsigned int timeout;
-       u8 lowcyl, highcyl;
+       u16 len;
+       u8 ireason;
 
        /* check for errors */
        dma = info->dma;
@@ -923,12 +923,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                goto end_request;
        }
 
-       /* ok we fall to pio :/ */
-       ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
-       lowcyl  = hwif->INB(hwif->io_ports.lbam_addr);
-       highcyl = hwif->INB(hwif->io_ports.lbah_addr);
-
-       len = lowcyl + (256 * highcyl);
+       ide_read_bcount_and_ireason(drive, &len, &ireason);
 
        thislen = blk_fs_request(rq) ? len : rq->data_len;
        if (thislen > len)
@@ -991,10 +986,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 
        if (ireason == 0) {
                write = 1;
-               xferfunc = hwif->output_data;
+               xferfunc = hwif->tp_ops->output_data;
        } else {
                write = 0;
-               xferfunc = hwif->input_data;
+               xferfunc = hwif->tp_ops->input_data;
        }
 
        /* transfer data */
@@ -1198,9 +1193,10 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
        int xferlen;
 
        if (blk_fs_request(rq)) {
-               if (info->cd_flags & IDE_CD_FLAG_SEEKING) {
+               if (drive->atapi_flags & IDE_AFLAG_SEEKING) {
+                       ide_hwif_t *hwif = drive->hwif;
                        unsigned long elapsed = jiffies - info->start_seek;
-                       int stat = ide_read_status(drive);
+                       int stat = hwif->tp_ops->read_status(hwif);
 
                        if ((stat & SEEK_STAT) != SEEK_STAT) {
                                if (elapsed < IDECD_SEEK_TIMEOUT) {
@@ -1211,7 +1207,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
                                printk(KERN_ERR "%s: DSC timeout\n",
                                                drive->name);
                        }
-                       info->cd_flags &= ~IDE_CD_FLAG_SEEKING;
+                       drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
                }
                if (rq_data_dir(rq) == READ &&
                    IDE_LARGE_SEEK(info->last_block, block,
@@ -1288,7 +1284,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
         */
        cmd[7] = cdi->sanyo_slot % 3;
 
-       return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, REQ_QUIET);
+       return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
 }
 
 static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -1296,8 +1292,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
                               struct request_sense *sense)
 {
        struct {
-               __u32 lba;
-               __u32 blocklen;
+               __be32 lba;
+               __be32 blocklen;
        } capbuf;
 
        int stat;
@@ -1369,7 +1365,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
         */
        (void) cdrom_check_status(drive, sense);
 
-       if (info->cd_flags & IDE_CD_FLAG_TOC_VALID)
+       if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
                return 0;
 
        /* try to get the total cdrom capacity and sector size */
@@ -1391,7 +1387,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
        if (stat)
                return stat;
 
-       if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
+       if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
                toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
                toc->hdr.last_track  = BCD2BIN(toc->hdr.last_track);
        }
@@ -1432,7 +1428,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
                if (stat)
                        return stat;
 
-               if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
+               if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
                        toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT);
                        toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT);
                } else {
@@ -1446,14 +1442,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
 
        toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
 
-       if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
+       if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
                toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
                toc->hdr.last_track  = BCD2BIN(toc->hdr.last_track);
        }
 
        for (i = 0; i <= ntracks; i++) {
-               if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
-                       if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD)
+               if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
+                       if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
                                toc->ent[i].track = BCD2BIN(toc->ent[i].track);
                        msf_from_bcd(&toc->ent[i].addr.msf);
                }
@@ -1476,7 +1472,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
                toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
        }
 
-       if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
+       if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
                /* re-read multisession information using MSF format */
                stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
                                           sizeof(ms_tmp), sense);
@@ -1500,7 +1496,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
        }
 
        /* Remember that we've read this stuff. */
-       info->cd_flags |= IDE_CD_FLAG_TOC_VALID;
+       drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
 
        return 0;
 }
@@ -1512,7 +1508,7 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
        struct packet_command cgc;
        int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
 
-       if ((info->cd_flags & IDE_CD_FLAG_FULL_CAPS_PAGE) == 0)
+       if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
                size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
 
        init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
@@ -1530,15 +1526,12 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
        struct cdrom_info *cd = drive->driver_data;
        u16 curspeed, maxspeed;
 
-       curspeed = *(u16 *)&buf[8 + 14];
-       maxspeed = *(u16 *)&buf[8 +  8];
-
-       if (cd->cd_flags & IDE_CD_FLAG_LE_SPEED_FIELDS) {
-               curspeed = le16_to_cpu(curspeed);
-               maxspeed = le16_to_cpu(maxspeed);
+       if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
+               curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
+               maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
        } else {
-               curspeed = be16_to_cpu(curspeed);
-               maxspeed = be16_to_cpu(maxspeed);
+               curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
+               maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
        }
 
        cd->current_speed = (curspeed + (176/2)) / 176;
@@ -1579,7 +1572,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
        devinfo->handle = drive;
        strcpy(devinfo->name, drive->name);
 
-       if (info->cd_flags & IDE_CD_FLAG_NO_SPEED_SELECT)
+       if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
                devinfo->mask |= CDC_SELECT_SPEED;
 
        devinfo->disk = info->disk;
@@ -1605,8 +1598,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
                return nslots;
        }
 
-       if (cd->cd_flags & IDE_CD_FLAG_PRE_ATAPI12) {
-               cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT;
+       if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
+               drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
                cdi->mask &= ~CDC_PLAY_AUDIO;
                return nslots;
        }
@@ -1624,9 +1617,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
                return 0;
 
        if ((buf[8 + 6] & 0x01) == 0)
-               cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK;
+               drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
        if (buf[8 + 6] & 0x08)
-               cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT;
+               drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
        if (buf[8 + 3] & 0x01)
                cdi->mask &= ~CDC_CD_R;
        if (buf[8 + 3] & 0x02)
@@ -1637,7 +1630,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
                cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
        if (buf[8 + 3] & 0x10)
                cdi->mask &= ~CDC_DVD_R;
-       if ((buf[8 + 4] & 0x01) || (cd->cd_flags & IDE_CD_FLAG_PLAY_AUDIO_OK))
+       if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
                cdi->mask &= ~CDC_PLAY_AUDIO;
 
        mechtype = buf[8 + 6] >> 5;
@@ -1679,7 +1672,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
        else
                printk(KERN_CONT " drive");
 
-       printk(KERN_CONT ", %dkB Cache\n", be16_to_cpu(*(u16 *)&buf[8 + 12]));
+       printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12]));
 
        return nslots;
 }
@@ -1802,43 +1795,43 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
 
 static const struct cd_list_entry ide_cd_quirks_list[] = {
        /* Limit transfer size per interrupt. */
-       { "SAMSUNG CD-ROM SCR-2430", NULL,   IDE_CD_FLAG_LIMIT_NFRAMES      },
-       { "SAMSUNG CD-ROM SCR-2432", NULL,   IDE_CD_FLAG_LIMIT_NFRAMES      },
+       { "SAMSUNG CD-ROM SCR-2430", NULL,   IDE_AFLAG_LIMIT_NFRAMES         },
+       { "SAMSUNG CD-ROM SCR-2432", NULL,   IDE_AFLAG_LIMIT_NFRAMES         },
        /* SCR-3231 doesn't support the SET_CD_SPEED command. */
-       { "SAMSUNG CD-ROM SCR-3231", NULL,   IDE_CD_FLAG_NO_SPEED_SELECT    },
+       { "SAMSUNG CD-ROM SCR-3231", NULL,   IDE_AFLAG_NO_SPEED_SELECT       },
        /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
-       { "NEC CD-ROM DRIVE:260",    "1.01", IDE_CD_FLAG_TOCADDR_AS_BCD |
-                                            IDE_CD_FLAG_PRE_ATAPI12,       },
+       { "NEC CD-ROM DRIVE:260",    "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
+                                            IDE_AFLAG_PRE_ATAPI12,          },
        /* Vertos 300, some versions of this drive like to talk BCD. */
-       { "V003S0DS",                NULL,   IDE_CD_FLAG_VERTOS_300_SSD,    },
+       { "V003S0DS",                NULL,   IDE_AFLAG_VERTOS_300_SSD,       },
        /* Vertos 600 ESD. */
-       { "V006E0DS",                NULL,   IDE_CD_FLAG_VERTOS_600_ESD,    },
+       { "V006E0DS",                NULL,   IDE_AFLAG_VERTOS_600_ESD,       },
        /*
         * Sanyo 3 CD changer uses a non-standard command for CD changing
         * (by default standard ATAPI support for CD changers is used).
         */
-       { "CD-ROM CDR-C3 G",         NULL,   IDE_CD_FLAG_SANYO_3CD          },
-       { "CD-ROM CDR-C3G",          NULL,   IDE_CD_FLAG_SANYO_3CD          },
-       { "CD-ROM CDR_C36",          NULL,   IDE_CD_FLAG_SANYO_3CD          },
+       { "CD-ROM CDR-C3 G",         NULL,   IDE_AFLAG_SANYO_3CD             },
+       { "CD-ROM CDR-C3G",          NULL,   IDE_AFLAG_SANYO_3CD             },
+       { "CD-ROM CDR_C36",          NULL,   IDE_AFLAG_SANYO_3CD             },
        /* Stingray 8X CD-ROM. */
-       { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_CD_FLAG_PRE_ATAPI12},
+       { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
        /*
         * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
         * mode sense page capabilities size, but older drives break.
         */
-       { "ATAPI CD ROM DRIVE 50X MAX", NULL,   IDE_CD_FLAG_FULL_CAPS_PAGE  },
-       { "WPI CDS-32X",                NULL,   IDE_CD_FLAG_FULL_CAPS_PAGE  },
+       { "ATAPI CD ROM DRIVE 50X MAX", NULL,   IDE_AFLAG_FULL_CAPS_PAGE     },
+       { "WPI CDS-32X",                NULL,   IDE_AFLAG_FULL_CAPS_PAGE     },
        /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
-       { "",                        "241N", IDE_CD_FLAG_LE_SPEED_FIELDS    },
+       { "",                        "241N", IDE_AFLAG_LE_SPEED_FIELDS       },
        /*
         * Some drives used by Apple don't advertise audio play
         * but they do support reading TOC & audio datas.
         */
-       { "MATSHITADVD-ROM SR-8187", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
-       { "MATSHITADVD-ROM SR-8186", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
-       { "MATSHITADVD-ROM SR-8176", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
-       { "MATSHITADVD-ROM SR-8174", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
-       { "Optiarc DVD RW AD-5200A", NULL,   IDE_CD_FLAG_PLAY_AUDIO_OK      },
+       { "MATSHITADVD-ROM SR-8187", NULL,   IDE_AFLAG_PLAY_AUDIO_OK         },
+       { "MATSHITADVD-ROM SR-8186", NULL,   IDE_AFLAG_PLAY_AUDIO_OK         },
+       { "MATSHITADVD-ROM SR-8176", NULL,   IDE_AFLAG_PLAY_AUDIO_OK         },
+       { "MATSHITADVD-ROM SR-8174", NULL,   IDE_AFLAG_PLAY_AUDIO_OK         },
+       { "Optiarc DVD RW AD-5200A", NULL,   IDE_AFLAG_PLAY_AUDIO_OK         },
        { NULL, NULL, 0 }
 };
 
@@ -1873,20 +1866,20 @@ static int ide_cdrom_setup(ide_drive_t *drive)
 
        drive->special.all      = 0;
 
-       cd->cd_flags = IDE_CD_FLAG_MEDIA_CHANGED | IDE_CD_FLAG_NO_EJECT |
+       drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT |
                       ide_cd_flags(id);
 
        if ((id->config & 0x0060) == 0x20)
-               cd->cd_flags |= IDE_CD_FLAG_DRQ_INTERRUPT;
+               drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
 
-       if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_300_SSD) &&
+       if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
            id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
-               cd->cd_flags |= (IDE_CD_FLAG_TOCTRACKS_AS_BCD |
-                                IDE_CD_FLAG_TOCADDR_AS_BCD);
-       else if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_600_ESD) &&
+               drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
+                                    IDE_AFLAG_TOCADDR_AS_BCD);
+       else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
                 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
-               cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD;
-       else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD)
+               drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
+       else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
                /* 3 => use CD in slot 0 */
                cdi->sanyo_slot = 3;
 
index fe0ea36e4124d1c1ba1cc438d9c8538918122664..61a4599b77dbf3c37528799118c181960a5f5786 100644 (file)
 #define ATAPI_CAPABILITIES_PAGE_SIZE           (8 + 20)
 #define ATAPI_CAPABILITIES_PAGE_PAD_SIZE       4
 
-enum {
-       /* Device sends an interrupt when ready for a packet command. */
-       IDE_CD_FLAG_DRQ_INTERRUPT       = (1 << 0),
-       /* Drive cannot lock the door. */
-       IDE_CD_FLAG_NO_DOORLOCK         = (1 << 1),
-       /* Drive cannot eject the disc. */
-       IDE_CD_FLAG_NO_EJECT            = (1 << 2),
-       /* Drive is a pre ATAPI 1.2 drive. */
-       IDE_CD_FLAG_PRE_ATAPI12         = (1 << 3),
-       /* TOC addresses are in BCD. */
-       IDE_CD_FLAG_TOCADDR_AS_BCD      = (1 << 4),
-       /* TOC track numbers are in BCD. */
-       IDE_CD_FLAG_TOCTRACKS_AS_BCD    = (1 << 5),
-       /*
-        * Drive does not provide data in multiples of SECTOR_SIZE
-        * when more than one interrupt is needed.
-        */
-       IDE_CD_FLAG_LIMIT_NFRAMES       = (1 << 6),
-       /* Seeking in progress. */
-       IDE_CD_FLAG_SEEKING             = (1 << 7),
-       /* Driver has noticed a media change. */
-       IDE_CD_FLAG_MEDIA_CHANGED       = (1 << 8),
-       /* Saved TOC information is current. */
-       IDE_CD_FLAG_TOC_VALID           = (1 << 9),
-       /* We think that the drive door is locked. */
-       IDE_CD_FLAG_DOOR_LOCKED         = (1 << 10),
-       /* SET_CD_SPEED command is unsupported. */
-       IDE_CD_FLAG_NO_SPEED_SELECT     = (1 << 11),
-       IDE_CD_FLAG_VERTOS_300_SSD      = (1 << 12),
-       IDE_CD_FLAG_VERTOS_600_ESD      = (1 << 13),
-       IDE_CD_FLAG_SANYO_3CD           = (1 << 14),
-       IDE_CD_FLAG_FULL_CAPS_PAGE      = (1 << 15),
-       IDE_CD_FLAG_PLAY_AUDIO_OK       = (1 << 16),
-       IDE_CD_FLAG_LE_SPEED_FIELDS     = (1 << 17),
-};
-
 /* Structure of a MSF cdrom address. */
 struct atapi_msf {
        byte reserved;
@@ -128,8 +92,6 @@ struct cdrom_info {
        unsigned long last_block;
        unsigned long start_seek;
 
-       unsigned int cd_flags;
-
        u8 max_speed;           /* Max speed of the drive. */
        u8 current_speed;       /* Current speed of the drive. */
 
index 24d002addf73d5c5b6b1e9af1dcad6ceed15dd7b..74231b41f611b5c63a2e60198ab9b742405deb33 100644 (file)
@@ -27,10 +27,9 @@ int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
 void ide_cdrom_release_real(struct cdrom_device_info *cdi)
 {
        ide_drive_t *drive = cdi->handle;
-       struct cdrom_info *cd = drive->driver_data;
 
        if (!cdi->use_count)
-               cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
+               drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
 }
 
 /*
@@ -83,13 +82,12 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
                                       int slot_nr)
 {
        ide_drive_t *drive = cdi->handle;
-       struct cdrom_info *cd = drive->driver_data;
        int retval;
 
        if (slot_nr == CDSL_CURRENT) {
                (void) cdrom_check_status(drive, NULL);
-               retval = (cd->cd_flags & IDE_CD_FLAG_MEDIA_CHANGED) ? 1 : 0;
-               cd->cd_flags &= ~IDE_CD_FLAG_MEDIA_CHANGED;
+               retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0;
+               drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
                return retval;
        } else {
                return -EINVAL;
@@ -107,11 +105,11 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
        char loej = 0x02;
        unsigned char cmd[BLK_MAX_CDB];
 
-       if ((cd->cd_flags & IDE_CD_FLAG_NO_EJECT) && !ejectflag)
+       if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
                return -EDRIVE_CANT_DO_THIS;
 
        /* reload fails on some drives, if the tray is locked */
-       if ((cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) && ejectflag)
+       if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
                return 0;
 
        /* only tell drive to close tray if open, if it can do that */
@@ -123,7 +121,7 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
        cmd[0] = GPCMD_START_STOP_UNIT;
        cmd[4] = loej | (ejectflag != 0);
 
-       return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, 0);
+       return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
 }
 
 /* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
@@ -131,7 +129,6 @@ static
 int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
                    struct request_sense *sense)
 {
-       struct cdrom_info *cd = drive->driver_data;
        struct request_sense my_sense;
        int stat;
 
@@ -139,7 +136,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
                sense = &my_sense;
 
        /* If the drive cannot lock the door, just pretend. */
-       if (cd->cd_flags & IDE_CD_FLAG_NO_DOORLOCK) {
+       if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) {
                stat = 0;
        } else {
                unsigned char cmd[BLK_MAX_CDB];
@@ -149,7 +146,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
                cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
                cmd[4] = lockflag ? 1 : 0;
 
-               stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0,
+               stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
                                       sense, 0, 0);
        }
 
@@ -160,7 +157,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
            (sense->asc == 0x24 || sense->asc == 0x20)) {
                printk(KERN_ERR "%s: door locking not supported\n",
                        drive->name);
-               cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK;
+               drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
                stat = 0;
        }
 
@@ -170,9 +167,9 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
 
        if (stat == 0) {
                if (lockflag)
-                       cd->cd_flags |= IDE_CD_FLAG_DOOR_LOCKED;
+                       drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
                else
-                       cd->cd_flags &= ~IDE_CD_FLAG_DOOR_LOCKED;
+                       drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
        }
 
        return stat;
@@ -231,7 +228,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
                cmd[5] = speed & 0xff;
        }
 
-       stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0);
+       stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
 
        if (!ide_cdrom_get_capabilities(drive, buf)) {
                ide_cdrom_update_speed(drive, buf);
@@ -250,7 +247,7 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
        struct request_sense sense;
        int ret;
 
-       if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0 || !info->toc) {
+       if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
                ret = ide_cd_read_toc(drive, &sense);
                if (ret)
                        return ret;
@@ -308,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
         * A reset will unlock the door. If it was previously locked,
         * lock it again.
         */
-       if (cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED)
+       if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
                (void)ide_cd_lockdoor(drive, 1, &sense);
 
        return ret;
@@ -324,7 +321,7 @@ static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
        /*
         * don't serve cached data, if the toc isn't valid
         */
-       if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0)
+       if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
                return -EINVAL;
 
        /* Check validity of requested track number. */
@@ -374,7 +371,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
        lba_to_msf(lba_start,   &cmd[3], &cmd[4], &cmd[5]);
        lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
 
-       return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0);
+       return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
 }
 
 static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
index 3a2e80237c10f5d6daa1a0f377769428af2ba17d..df5fe5756871ca31b135a736dfc64f9d1097bec6 100644 (file)
@@ -158,7 +158,7 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
        write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
 
        if (dma)
-               index = drive->vdma ? 4 : 8;
+               index = 8;
        else
                index = drive->mult_count ? 0 : 4;
 
index 7ee44f86bc5475065a962f8b2380cd0b86158d1b..be99d463dcc7dfa0265498280e103715ee61bf61 100644 (file)
@@ -100,10 +100,11 @@ static const struct drive_list_entry drive_blacklist [] = {
  
 ide_startstop_t ide_dma_intr (ide_drive_t *drive)
 {
+       ide_hwif_t *hwif = drive->hwif;
        u8 stat = 0, dma_stat = 0;
 
-       dma_stat = drive->hwif->dma_ops->dma_end(drive);
-       stat = ide_read_status(drive);
+       dma_stat = hwif->dma_ops->dma_end(drive);
+       stat = hwif->tp_ops->read_status(hwif);
 
        if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
                if (!dma_stat) {
@@ -334,7 +335,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
 static int dma_timer_expiry (ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
-       u8 dma_stat             = hwif->INB(hwif->dma_status);
+       u8 dma_stat             = hwif->tp_ops->read_sff_dma_status(hwif);
 
        printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
                drive->name, dma_stat);
@@ -369,14 +370,18 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        u8 unit                 = (drive->select.b.unit & 0x01);
-       u8 dma_stat             = hwif->INB(hwif->dma_status);
+       u8 dma_stat             = hwif->tp_ops->read_sff_dma_status(hwif);
 
        if (on)
                dma_stat |= (1 << (5 + unit));
        else
                dma_stat &= ~(1 << (5 + unit));
 
-       hwif->OUTB(dma_stat, hwif->dma_status);
+       if (hwif->host_flags & IDE_HFLAG_MMIO)
+               writeb(dma_stat,
+                      (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+       else
+               outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
 }
 
 EXPORT_SYMBOL_GPL(ide_dma_host_set);
@@ -449,6 +454,7 @@ int ide_dma_setup(ide_drive_t *drive)
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq = HWGROUP(drive)->rq;
        unsigned int reading;
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
        u8 dma_stat;
 
        if (rq_data_dir(rq))
@@ -470,13 +476,21 @@ int ide_dma_setup(ide_drive_t *drive)
                outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
 
        /* specify r/w */
-       hwif->OUTB(reading, hwif->dma_command);
+       if (mmio)
+               writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+       else
+               outb(reading, hwif->dma_base + ATA_DMA_CMD);
 
-       /* read dma_status for INTR & ERROR flags */
-       dma_stat = hwif->INB(hwif->dma_status);
+       /* read DMA status for INTR & ERROR flags */
+       dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
 
        /* clear INTR & ERROR flags */
-       hwif->OUTB(dma_stat|6, hwif->dma_status);
+       if (mmio)
+               writeb(dma_stat | 6,
+                      (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+       else
+               outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
+
        drive->waiting_for_dma = 1;
        return 0;
 }
@@ -492,16 +506,24 @@ EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
 
 void ide_dma_start(ide_drive_t *drive)
 {
-       ide_hwif_t *hwif        = HWIF(drive);
-       u8 dma_cmd              = hwif->INB(hwif->dma_command);
+       ide_hwif_t *hwif = drive->hwif;
+       u8 dma_cmd;
 
        /* Note that this is done *after* the cmd has
         * been issued to the drive, as per the BM-IDE spec.
         * The Promise Ultra33 doesn't work correctly when
         * we do this part before issuing the drive cmd.
         */
-       /* start DMA */
-       hwif->OUTB(dma_cmd|1, hwif->dma_command);
+       if (hwif->host_flags & IDE_HFLAG_MMIO) {
+               dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+               /* start DMA */
+               writeb(dma_cmd | 1,
+                      (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+       } else {
+               dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
+               outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD);
+       }
+
        hwif->dma = 1;
        wmb();
 }
@@ -511,18 +533,33 @@ EXPORT_SYMBOL_GPL(ide_dma_start);
 /* returns 1 on error, 0 otherwise */
 int __ide_dma_end (ide_drive_t *drive)
 {
-       ide_hwif_t *hwif        = HWIF(drive);
+       ide_hwif_t *hwif = drive->hwif;
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
        u8 dma_stat = 0, dma_cmd = 0;
 
        drive->waiting_for_dma = 0;
-       /* get dma_command mode */
-       dma_cmd = hwif->INB(hwif->dma_command);
-       /* stop DMA */
-       hwif->OUTB(dma_cmd&~1, hwif->dma_command);
+
+       if (mmio) {
+               /* get DMA command mode */
+               dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+               /* stop DMA */
+               writeb(dma_cmd & ~1,
+                      (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+       } else {
+               dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
+               outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
+       }
+
        /* get DMA status */
-       dma_stat = hwif->INB(hwif->dma_status);
-       /* clear the INTR & ERROR bits */
-       hwif->OUTB(dma_stat|6, hwif->dma_status);
+       dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
+
+       if (mmio)
+               /* clear the INTR & ERROR bits */
+               writeb(dma_stat | 6,
+                      (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+       else
+               outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
+
        /* purge DMA mappings */
        ide_destroy_dmatable(drive);
        /* verify good DMA status */
@@ -537,7 +574,7 @@ EXPORT_SYMBOL(__ide_dma_end);
 int ide_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
-       u8 dma_stat             = hwif->INB(hwif->dma_status);
+       u8 dma_stat             = hwif->tp_ops->read_sff_dma_status(hwif);
 
        /* return 1 if INTR asserted */
        if ((dma_stat & 4) == 4)
@@ -719,9 +756,8 @@ static int ide_tune_dma(ide_drive_t *drive)
 static int ide_dma_check(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
-       int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
 
-       if (!vdma && ide_tune_dma(drive))
+       if (ide_tune_dma(drive))
                return 0;
 
        /* TODO: always do PIO fallback */
@@ -730,7 +766,7 @@ static int ide_dma_check(ide_drive_t *drive)
 
        ide_set_max_pio(drive);
 
-       return vdma ? 0 : -1;
+       return -1;
 }
 
 int ide_id_dma_bug(ide_drive_t *drive)
@@ -842,7 +878,7 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif)
 }
 EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
 
-static const struct ide_dma_ops sff_dma_ops = {
+const struct ide_dma_ops sff_dma_ops = {
        .dma_host_set           = ide_dma_host_set,
        .dma_setup              = ide_dma_setup,
        .dma_exec_cmd           = ide_dma_exec_cmd,
@@ -852,18 +888,5 @@ static const struct ide_dma_ops sff_dma_ops = {
        .dma_timeout            = ide_dma_timeout,
        .dma_lost_irq           = ide_dma_lost_irq,
 };
-
-void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
-{
-       hwif->dma_base = base;
-
-       if (!hwif->dma_command)
-               hwif->dma_command       = hwif->dma_base + 0;
-       if (!hwif->dma_status)
-               hwif->dma_status        = hwif->dma_base + 2;
-
-       hwif->dma_ops = &sff_dma_ops;
-}
-
-EXPORT_SYMBOL_GPL(ide_setup_dma);
+EXPORT_SYMBOL_GPL(sff_dma_ops);
 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
index 011d72011cc45524fcad2d1098de83a0c786b59d..3d8e6dd0f41e11f031e7dad1a1be1c434430cbf0 100644 (file)
@@ -125,26 +125,10 @@ typedef struct ide_floppy_obj {
        int wp;
        /* Supports format progress report */
        int srfp;
-       /* Status/Action flags */
-       unsigned long flags;
 } idefloppy_floppy_t;
 
 #define IDEFLOPPY_TICKS_DELAY  HZ/20   /* default delay for ZIP 100 (50ms) */
 
-/* Floppy flag bits values. */
-enum {
-       /* DRQ interrupt device */
-       IDEFLOPPY_FLAG_DRQ_INTERRUPT            = (1 << 0),
-       /* Media may have changed */
-       IDEFLOPPY_FLAG_MEDIA_CHANGED            = (1 << 1),
-       /* Format in progress */
-       IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS       = (1 << 2),
-       /* Avoid commands not supported in Clik drive */
-       IDEFLOPPY_FLAG_CLIK_DRIVE               = (1 << 3),
-       /* Requires BH algorithm for packets */
-       IDEFLOPPY_FLAG_ZIP_DRIVE                = (1 << 4),
-};
-
 /* Defines for the MODE SENSE command */
 #define MODE_SENSE_CURRENT             0x00
 #define MODE_SENSE_CHANGEABLE          0x01
@@ -247,9 +231,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 
                data = bvec_kmap_irq(bvec, &flags);
                if (direction)
-                       hwif->output_data(drive, NULL, data, count);
+                       hwif->tp_ops->output_data(drive, NULL, data, count);
                else
-                       hwif->input_data(drive, NULL, data, count);
+                       hwif->tp_ops->input_data(drive, NULL, data, count);
                bvec_kunmap_irq(data, &flags);
 
                bcount -= count;
@@ -291,6 +275,7 @@ static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
        rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->cmd_flags |= REQ_PREEMPT;
        rq->rq_disk = floppy->disk;
+       memcpy(rq->cmd, pc->c, 12);
        ide_do_drive_cmd(drive, rq);
 }
 
@@ -354,7 +339,6 @@ static void idefloppy_init_pc(struct ide_atapi_pc *pc)
        memset(pc, 0, sizeof(*pc));
        pc->buf = pc->pc_buf;
        pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
-       pc->callback = ide_floppy_callback;
 }
 
 static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -402,7 +386,7 @@ static int idefloppy_transfer_pc(ide_drive_t *drive)
        idefloppy_floppy_t *floppy = drive->driver_data;
 
        /* Send the actual packet */
-       drive->hwif->output_data(drive, NULL, floppy->pc->c, 12);
+       drive->hwif->tp_ops->output_data(drive, NULL, floppy->pc->c, 12);
 
        /* Timeout for the packet command */
        return IDEFLOPPY_WAIT_CMD;
@@ -429,7 +413,7 @@ static ide_startstop_t idefloppy_start_pc_transfer(ide_drive_t *drive)
         * 40 and 50msec work well. idefloppy_pc_intr will not be actually
         * used until after the packet is moved in about 50 msec.
         */
-       if (pc->flags & PC_FLAG_ZIP_DRIVE) {
+       if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
                timeout = floppy->ticks;
                expiry = &idefloppy_transfer_pc;
        } else {
@@ -474,7 +458,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
                pc->error = IDEFLOPPY_ERROR_GENERAL;
 
                floppy->failed_pc = NULL;
-               pc->callback(drive);
+               drive->pc_callback(drive);
                return ide_stopped;
        }
 
@@ -574,6 +558,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
        put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
        put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
 
+       memcpy(rq->cmd, pc->c, 12);
+
        pc->rq = rq;
        pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
        if (rq->cmd_flags & REQ_RW)
@@ -647,12 +633,6 @@ static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
                return ide_stopped;
        }
 
-       if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT)
-               pc->flags |= PC_FLAG_DRQ_INTERRUPT;
-
-       if (floppy->flags & IDEFLOPPY_FLAG_ZIP_DRIVE)
-               pc->flags |= PC_FLAG_ZIP_DRIVE;
-
        pc->rq = rq;
 
        return idefloppy_issue_pc(drive, pc);
@@ -671,6 +651,7 @@ static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->buffer = (char *) pc;
        rq->cmd_type = REQ_TYPE_SPECIAL;
+       memcpy(rq->cmd, pc->c, 12);
        error = blk_execute_rq(drive->queue, floppy->disk, rq, 0);
        blk_put_request(rq);
 
@@ -795,7 +776,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
                switch (pc.buf[desc_start + 4] & 0x03) {
                /* Clik! drive returns this instead of CAPACITY_CURRENT */
                case CAPACITY_UNFORMATTED:
-                       if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE))
+                       if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
                                /*
                                 * If it is not a clik drive, break out
                                 * (maintains previous driver behaviour)
@@ -841,7 +822,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
        }
 
        /* Clik! disk does not support get_flexible_disk_page */
-       if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE))
+       if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
                (void) ide_floppy_get_flexible_disk_page(drive);
 
        set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor);
@@ -949,11 +930,12 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
 
                /* Else assume format_unit has finished, and we're at 0x10000 */
        } else {
+               ide_hwif_t *hwif = drive->hwif;
                unsigned long flags;
                u8 stat;
 
                local_irq_save(flags);
-               stat = ide_read_status(drive);
+               stat = hwif->tp_ops->read_status(hwif);
                local_irq_restore(flags);
 
                progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
@@ -1039,9 +1021,10 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
 
        *((u16 *) &gcw) = drive->id->config;
        floppy->pc = floppy->pc_stack;
+       drive->pc_callback = ide_floppy_callback;
 
        if (((gcw[0] & 0x60) >> 5) == 1)
-               floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT;
+               drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
        /*
         * We used to check revisions here. At this point however I'm giving up.
         * Just assume they are all broken, its easier.
@@ -1052,7 +1035,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
         * we'll leave the limitation below for the 2.2.x tree.
         */
        if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) {
-               floppy->flags |= IDEFLOPPY_FLAG_ZIP_DRIVE;
+               drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
                /* This value will be visible in the /proc/ide/hdx/settings */
                floppy->ticks = IDEFLOPPY_TICKS_DELAY;
                blk_queue_max_sectors(drive->queue, 64);
@@ -1064,7 +1047,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
         */
        if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) {
                blk_queue_max_sectors(drive->queue, 64);
-               floppy->flags |= IDEFLOPPY_FLAG_CLIK_DRIVE;
+               drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
        }
 
        (void) ide_floppy_get_capacity(drive);
@@ -1153,7 +1136,7 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
        floppy->openers++;
 
        if (floppy->openers == 1) {
-               floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+               drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
                /* Just in case */
 
                idefloppy_init_pc(&pc);
@@ -1180,14 +1163,14 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
                        ret = -EROFS;
                        goto out_put_floppy;
                }
-               floppy->flags |= IDEFLOPPY_FLAG_MEDIA_CHANGED;
+               drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
                /* IOMEGA Clik! drives do not support lock/unlock commands */
-               if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) {
+               if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
                        idefloppy_create_prevent_cmd(&pc, 1);
                        (void) idefloppy_queue_pc_tail(drive, &pc);
                }
                check_disk_change(inode->i_bdev);
-       } else if (floppy->flags & IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS) {
+       } else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) {
                ret = -EBUSY;
                goto out_put_floppy;
        }
@@ -1210,12 +1193,12 @@ static int idefloppy_release(struct inode *inode, struct file *filp)
 
        if (floppy->openers == 1) {
                /* IOMEGA Clik! drives do not support lock/unlock commands */
-               if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) {
+               if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
                        idefloppy_create_prevent_cmd(&pc, 0);
                        (void) idefloppy_queue_pc_tail(drive, &pc);
                }
 
-               floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+               drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
        }
 
        floppy->openers--;
@@ -1236,15 +1219,17 @@ static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return 0;
 }
 
-static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
-               struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd)
+static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
+                              unsigned long arg, unsigned int cmd)
 {
+       idefloppy_floppy_t *floppy = drive->driver_data;
+
        if (floppy->openers > 1)
                return -EBUSY;
 
        /* The IOMEGA Clik! Drive doesn't support this command -
         * no room for an eject mechanism */
-       if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) {
+       if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
                int prevent = arg ? 1 : 0;
 
                if (cmd == CDROMEJECT)
@@ -1265,16 +1250,17 @@ static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
 static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
                                  int __user *arg)
 {
-       int blocks, length, flags, err = 0;
        struct ide_atapi_pc pc;
+       ide_drive_t *drive = floppy->drive;
+       int blocks, length, flags, err = 0;
 
        if (floppy->openers > 1) {
                /* Don't format if someone is using the disk */
-               floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+               drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
                return -EBUSY;
        }
 
-       floppy->flags |= IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+       drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS;
 
        /*
         * Send ATAPI_FORMAT_UNIT to the drive.
@@ -1298,15 +1284,15 @@ static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
                goto out;
        }
 
-       (void) idefloppy_get_sfrp_bit(floppy->drive);
+       (void) idefloppy_get_sfrp_bit(drive);
        idefloppy_create_format_unit_cmd(&pc, blocks, length, flags);
 
-       if (idefloppy_queue_pc_tail(floppy->drive, &pc))
+       if (idefloppy_queue_pc_tail(drive, &pc))
                err = -EIO;
 
 out:
        if (err)
-               floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+               drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
        return err;
 }
 
@@ -1325,7 +1311,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
        case CDROMEJECT:
                /* fall through */
        case CDROM_LOCKDOOR:
-               return ide_floppy_lockdoor(floppy, &pc, arg, cmd);
+               return ide_floppy_lockdoor(drive, &pc, arg, cmd);
        case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
                return 0;
        case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
@@ -1366,8 +1352,8 @@ static int idefloppy_media_changed(struct gendisk *disk)
                drive->attach = 0;
                return 0;
        }
-       ret = !!(floppy->flags & IDEFLOPPY_FLAG_MEDIA_CHANGED);
-       floppy->flags &= ~IDEFLOPPY_FLAG_MEDIA_CHANGED;
+       ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED);
+       drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
        return ret;
 }
 
index 2d92214096ab9fca7b1e5f85865757366a57a97d..31d98fec775f32e1732d13970a304fa7012a7e95 100644 (file)
@@ -28,29 +28,21 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
 
 static ssize_t store_add(struct class *cls, const char *buf, size_t n)
 {
-       ide_hwif_t *hwif;
        unsigned int base, ctl;
-       int irq;
-       hw_regs_t hw;
-       u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
+       int irq, rc;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
                return -EINVAL;
 
-       hwif = ide_find_port();
-       if (hwif == NULL)
-               return -ENOENT;
-
        memset(&hw, 0, sizeof(hw));
        ide_std_init_ports(&hw, base, ctl);
        hw.irq = irq;
        hw.chipset = ide_generic;
 
-       ide_init_port_hw(hwif, &hw);
-
-       idx[0] = hwif->index;
-
-       ide_device_add(idx, NULL);
+       rc = ide_host_add(NULL, hws, NULL);
+       if (rc)
+               return rc;
 
        return n;
 };
@@ -90,18 +82,18 @@ static int __init ide_generic_sysfs_init(void)
 
 static int __init ide_generic_init(void)
 {
-       u8 idx[MAX_HWIFS];
-       int i;
+       hw_regs_t hw[MAX_HWIFS], *hws[MAX_HWIFS];
+       struct ide_host *host;
+       unsigned long io_addr;
+       int i, rc;
 
        printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module "
                         "parameter for probing all legacy ISA IDE ports\n");
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               ide_hwif_t *hwif;
-               unsigned long io_addr = ide_default_io_base(i);
-               hw_regs_t hw;
+               io_addr = ide_default_io_base(i);
 
-               idx[i] = 0xff;
+               hws[i] = NULL;
 
                if ((probe_mask & (1 << i)) && io_addr) {
                        if (!request_region(io_addr, 8, DRV_NAME)) {
@@ -119,33 +111,42 @@ static int __init ide_generic_init(void)
                                continue;
                        }
 
-                       /*
-                        * Skip probing if the corresponding
-                        * slot is already occupied.
-                        */
-                       hwif = ide_find_port();
-                       if (hwif == NULL || hwif->index != i) {
-                               idx[i] = 0xff;
-                               continue;
-                       }
-
-                       memset(&hw, 0, sizeof(hw));
-                       ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
-                       hw.irq = ide_default_irq(io_addr);
-                       hw.chipset = ide_generic;
-                       ide_init_port_hw(hwif, &hw);
+                       memset(&hw[i], 0, sizeof(hw[i]));
+                       ide_std_init_ports(&hw[i], io_addr, io_addr + 0x206);
+                       hw[i].irq = ide_default_irq(io_addr);
+                       hw[i].chipset = ide_generic;
 
-                       idx[i] = i;
+                       hws[i] = &hw[i];
                }
        }
 
-       ide_device_add_all(idx, NULL);
+       host = ide_host_alloc_all(NULL, hws);
+       if (host == NULL) {
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       rc = ide_host_register(host, NULL, hws);
+       if (rc)
+               goto err_free;
 
        if (ide_generic_sysfs_init())
                printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
                                         "class\n");
 
        return 0;
+err_free:
+       ide_host_free(host);
+err:
+       for (i = 0; i < MAX_HWIFS; i++) {
+               if (hws[i] == NULL)
+                       continue;
+
+               io_addr = hws[i]->io_ports.data_addr;
+               release_region(io_addr + 0x206, 1);
+               release_region(io_addr, 8);
+       }
+       return rc;
 }
 
 module_init(ide_generic_init);
index 661b75a89d4dc4d2227a09d3ce69819a6132e42e..a896a283f27fdd04d7e211c2c2d81b3924d8bd9f 100644 (file)
@@ -330,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
                        tf->error = err;
                        tf->status = stat;
 
-                       drive->hwif->tf_read(drive, task);
+                       drive->hwif->tp_ops->tf_read(drive, task);
 
                        if (task->tf_flags & IDE_TFLAG_DYN)
                                kfree(task);
@@ -381,8 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
                if (err == ABRT_ERR) {
                        if (drive->select.b.lba &&
                            /* some newer drives don't support WIN_SPECIFY */
-                           hwif->INB(hwif->io_ports.command_addr) ==
-                               WIN_SPECIFY)
+                           hwif->tp_ops->read_status(hwif) == WIN_SPECIFY)
                                return ide_stopped;
                } else if ((err & BAD_CRC) == BAD_CRC) {
                        /* UDMA crc error, just retry the operation */
@@ -408,7 +407,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
                return ide_stopped;
        }
 
-       if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+       if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
                rq->errors |= ERROR_RESET;
 
        if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -435,10 +434,9 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
                /* add decoding error stuff */
        }
 
-       if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+       if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
                /* force an abort */
-               hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
-                              hwif->io_ports.command_addr);
+               hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
 
        if (rq->errors >= ERROR_MAX) {
                ide_kill_rq(drive, rq);
@@ -712,7 +710,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
 #ifdef DEBUG
        printk("%s: DRIVE_CMD (null)\n", drive->name);
 #endif
-       ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive));
+       ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
+                         ide_read_error(drive));
 
        return ide_stopped;
 }
@@ -747,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
                 * the bus may be broken enough to walk on our toes at this
                 * point.
                 */
+               ide_hwif_t *hwif = drive->hwif;
                int rc;
 #ifdef DEBUG_PM
                printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
 #endif
-               rc = ide_wait_not_busy(HWIF(drive), 35000);
+               rc = ide_wait_not_busy(hwif, 35000);
                if (rc)
                        printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
                SELECT_DRIVE(drive);
-               ide_set_irq(drive, 1);
-               rc = ide_wait_not_busy(HWIF(drive), 100000);
+               hwif->tp_ops->set_irq(hwif, 1);
+               rc = ide_wait_not_busy(hwif, 100000);
                if (rc)
                        printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
        }
@@ -1042,7 +1042,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
                         * quirk_list may not like intr setups/cleanups
                         */
                        if (drive->quirk_list != 1)
-                               ide_set_irq(drive, 0);
+                               hwif->tp_ops->set_irq(hwif, 0);
                }
                hwgroup->hwif = hwif;
                hwgroup->drive = drive;
@@ -1142,7 +1142,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
                printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
                (void)hwif->dma_ops->dma_end(drive);
                ret = ide_error(drive, "dma timeout error",
-                               ide_read_status(drive));
+                               hwif->tp_ops->read_status(hwif));
        } else {
                printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
                hwif->dma_ops->dma_timeout(drive);
@@ -1267,7 +1267,7 @@ void ide_timer_expiry (unsigned long data)
                                } else
                                        startstop =
                                        ide_error(drive, "irq timeout",
-                                                 ide_read_status(drive));
+                                                 hwif->tp_ops->read_status(hwif));
                        }
                        drive->service_time = jiffies - drive->service_start;
                        spin_lock_irq(&ide_lock);
@@ -1323,7 +1323,8 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
         */
        do {
                if (hwif->irq == irq) {
-                       stat = hwif->INB(hwif->io_ports.status_addr);
+                       stat = hwif->tp_ops->read_status(hwif);
+
                        if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
                                /* Try to not flood the console with msgs */
                                static unsigned long last_msgtime, count;
@@ -1413,7 +1414,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
                         * Whack the status register, just in case
                         * we have a leftover pending IRQ.
                         */
-                       (void) hwif->INB(hwif->io_ports.status_addr);
+                       (void)hwif->tp_ops->read_status(hwif);
 #endif /* CONFIG_BLK_DEV_IDEPCI */
                }
                spin_unlock_irqrestore(&ide_lock, flags);
@@ -1519,6 +1520,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
 
 void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
 {
+       ide_hwif_t *hwif = drive->hwif;
        ide_task_t task;
 
        memset(&task, 0, sizeof(task));
@@ -1529,9 +1531,9 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
        task.tf.lbah    = (bcount >> 8) & 0xff;
 
        ide_tf_dump(drive->name, &task.tf);
-       ide_set_irq(drive, 1);
+       hwif->tp_ops->set_irq(hwif, 1);
        SELECT_MASK(drive, 0);
-       drive->hwif->tf_load(drive, &task);
+       hwif->tp_ops->tf_load(drive, &task);
 }
 
 EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
@@ -1543,9 +1545,9 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len)
 
        while (len > 0) {
                if (write)
-                       hwif->output_data(drive, NULL, buf, min(4, len));
+                       hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
                else
-                       hwif->input_data(drive, NULL, buf, min(4, len));
+                       hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
                len -= 4;
        }
 }
index 44aaec256a30d94b3d07f8a8b39a4283873c60a5..07da5fb9eaff0d760e93c5736b1b3dedb19f58de 100644 (file)
@@ -42,18 +42,6 @@ static void ide_outb (u8 val, unsigned long port)
        outb(val, port);
 }
 
-static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
-{
-       outb(addr, port);
-}
-
-void default_hwif_iops (ide_hwif_t *hwif)
-{
-       hwif->OUTB      = ide_outb;
-       hwif->OUTBSYNC  = ide_outbsync;
-       hwif->INB       = ide_inb;
-}
-
 /*
  *     MMIO operations, typically used for SATA controllers
  */
@@ -68,31 +56,19 @@ static void ide_mm_outb (u8 value, unsigned long port)
        writeb(value, (void __iomem *) port);
 }
 
-static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
-{
-       writeb(value, (void __iomem *) port);
-}
-
-void default_hwif_mmiops (ide_hwif_t *hwif)
-{
-       hwif->OUTB      = ide_mm_outb;
-       /* Most systems will need to override OUTBSYNC, alas however
-          this one is controller specific! */
-       hwif->OUTBSYNC  = ide_mm_outbsync;
-       hwif->INB       = ide_mm_inb;
-}
-
-EXPORT_SYMBOL(default_hwif_mmiops);
-
 void SELECT_DRIVE (ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
        const struct ide_port_ops *port_ops = hwif->port_ops;
+       ide_task_t task;
 
        if (port_ops && port_ops->selectproc)
                port_ops->selectproc(drive);
 
-       hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
+       memset(&task, 0, sizeof(task));
+       task.tf_flags = IDE_TFLAG_OUT_DEVICE;
+
+       drive->hwif->tp_ops->tf_load(drive, &task);
 }
 
 void SELECT_MASK(ide_drive_t *drive, int mask)
@@ -103,7 +79,61 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
                port_ops->maskproc(drive, mask);
 }
 
-static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+       if (hwif->host_flags & IDE_HFLAG_MMIO)
+               writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+       else
+               outb(cmd, hwif->io_ports.command_addr);
+}
+EXPORT_SYMBOL_GPL(ide_exec_command);
+
+u8 ide_read_status(ide_hwif_t *hwif)
+{
+       if (hwif->host_flags & IDE_HFLAG_MMIO)
+               return readb((void __iomem *)hwif->io_ports.status_addr);
+       else
+               return inb(hwif->io_ports.status_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_status);
+
+u8 ide_read_altstatus(ide_hwif_t *hwif)
+{
+       if (hwif->host_flags & IDE_HFLAG_MMIO)
+               return readb((void __iomem *)hwif->io_ports.ctl_addr);
+       else
+               return inb(hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_altstatus);
+
+u8 ide_read_sff_dma_status(ide_hwif_t *hwif)
+{
+       if (hwif->host_flags & IDE_HFLAG_MMIO)
+               return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+       else
+               return inb(hwif->dma_base + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ide_read_sff_dma_status);
+
+void ide_set_irq(ide_hwif_t *hwif, int on)
+{
+       u8 ctl = ATA_DEVCTL_OBS;
+
+       if (on == 4) { /* hack for SRST */
+               ctl |= 4;
+               on &= ~4;
+       }
+
+       ctl |= on ? 0 : 2;
+
+       if (hwif->host_flags & IDE_HFLAG_MMIO)
+               writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+       else
+               outb(ctl, hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_set_irq);
+
+void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -155,8 +185,9 @@ static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
                tf_outb((tf->device & HIHI) | drive->select.all,
                         io_ports->device_addr);
 }
+EXPORT_SYMBOL_GPL(ide_tf_load);
 
-static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -188,6 +219,8 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
        /* be sure we're looking at the low order bits */
        tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
 
+       if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+               tf->feature = tf_inb(io_ports->feature_addr);
        if (task->tf_flags & IDE_TFLAG_IN_NSECT)
                tf->nsect  = tf_inb(io_ports->nsect_addr);
        if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -214,6 +247,7 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
                        tf->hob_lbah    = tf_inb(io_ports->lbah_addr);
        }
 }
+EXPORT_SYMBOL_GPL(ide_tf_read);
 
 /*
  * Some localbus EIDE interfaces require a special access sequence
@@ -236,8 +270,8 @@ static void ata_vlb_sync(unsigned long port)
  * so if an odd len is specified, be sure that there's at least one
  * extra byte allocated for the buffer.
  */
-static void ata_input_data(ide_drive_t *drive, struct request *rq,
-                          void *buf, unsigned int len)
+void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
+                   unsigned int len)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -277,12 +311,13 @@ static void ata_input_data(ide_drive_t *drive, struct request *rq,
                        insw(data_addr, buf, len / 2);
        }
 }
+EXPORT_SYMBOL_GPL(ide_input_data);
 
 /*
  * This is used for most PIO data transfers *to* the IDE interface
  */
-static void ata_output_data(ide_drive_t *drive, struct request *rq,
-                           void *buf, unsigned int len)
+void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
+                    unsigned int len)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -320,15 +355,50 @@ static void ata_output_data(ide_drive_t *drive, struct request *rq,
                        outsw(data_addr, buf, len / 2);
        }
 }
+EXPORT_SYMBOL_GPL(ide_output_data);
+
+u8 ide_read_error(ide_drive_t *drive)
+{
+       ide_task_t task;
+
+       memset(&task, 0, sizeof(task));
+       task.tf_flags = IDE_TFLAG_IN_FEATURE;
+
+       drive->hwif->tp_ops->tf_read(drive, &task);
+
+       return task.tf.error;
+}
+EXPORT_SYMBOL_GPL(ide_read_error);
 
-void default_hwif_transport(ide_hwif_t *hwif)
+void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
 {
-       hwif->tf_load     = ide_tf_load;
-       hwif->tf_read     = ide_tf_read;
+       ide_task_t task;
+
+       memset(&task, 0, sizeof(task));
+       task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
+                       IDE_TFLAG_IN_NSECT;
 
-       hwif->input_data  = ata_input_data;
-       hwif->output_data = ata_output_data;
+       drive->hwif->tp_ops->tf_read(drive, &task);
+
+       *bcount = (task.tf.lbah << 8) | task.tf.lbam;
+       *ireason = task.tf.nsect & 3;
 }
+EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
+
+const struct ide_tp_ops default_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = ide_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = ide_set_irq,
+
+       .tf_load                = ide_tf_load,
+       .tf_read                = ide_tf_read,
+
+       .input_data             = ide_input_data,
+       .output_data            = ide_output_data,
+};
 
 void ide_fix_driveid (struct hd_driveid *id)
 {
@@ -483,10 +553,10 @@ int drive_is_ready (ide_drive_t *drive)
         * about possible isa-pnp and pci-pnp issues yet.
         */
        if (hwif->io_ports.ctl_addr)
-               stat = ide_read_altstatus(drive);
+               stat = hwif->tp_ops->read_altstatus(hwif);
        else
                /* Note: this may clear a pending IRQ!! */
-               stat = ide_read_status(drive);
+               stat = hwif->tp_ops->read_status(hwif);
 
        if (stat & BUSY_STAT)
                /* drive busy:  definitely not interrupting */
@@ -511,24 +581,26 @@ EXPORT_SYMBOL(drive_is_ready);
  */
 static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
 {
+       ide_hwif_t *hwif = drive->hwif;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        unsigned long flags;
        int i;
        u8 stat;
 
        udelay(1);      /* spec allows drive 400ns to assert "BUSY" */
-       stat = ide_read_status(drive);
+       stat = tp_ops->read_status(hwif);
 
        if (stat & BUSY_STAT) {
                local_irq_set(flags);
                timeout += jiffies;
-               while ((stat = ide_read_status(drive)) & BUSY_STAT) {
+               while ((stat = tp_ops->read_status(hwif)) & BUSY_STAT) {
                        if (time_after(jiffies, timeout)) {
                                /*
                                 * One last read after the timeout in case
                                 * heavy interrupt load made us not make any
                                 * progress during the timeout..
                                 */
-                               stat = ide_read_status(drive);
+                               stat = tp_ops->read_status(hwif);
                                if (!(stat & BUSY_STAT))
                                        break;
 
@@ -548,7 +620,7 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
         */
        for (i = 0; i < 10; i++) {
                udelay(1);
-               stat = ide_read_status(drive);
+               stat = tp_ops->read_status(hwif);
 
                if (OK_STAT(stat, good, bad)) {
                        *rstat = stat;
@@ -674,6 +746,7 @@ no_80w:
 int ide_driveid_update(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        struct hd_driveid *id;
        unsigned long timeout, flags;
        u8 stat;
@@ -684,9 +757,9 @@ int ide_driveid_update(ide_drive_t *drive)
         */
 
        SELECT_MASK(drive, 1);
-       ide_set_irq(drive, 0);
+       tp_ops->set_irq(hwif, 0);
        msleep(50);
-       hwif->OUTBSYNC(hwif, WIN_IDENTIFY, hwif->io_ports.command_addr);
+       tp_ops->exec_command(hwif, WIN_IDENTIFY);
        timeout = jiffies + WAIT_WORSTCASE;
        do {
                if (time_after(jiffies, timeout)) {
@@ -695,11 +768,11 @@ int ide_driveid_update(ide_drive_t *drive)
                }
 
                msleep(50);     /* give drive a breather */
-               stat = ide_read_altstatus(drive);
+               stat = tp_ops->read_altstatus(hwif);
        } while (stat & BUSY_STAT);
 
        msleep(50);     /* wait for IRQ and DRQ_STAT */
-       stat = ide_read_status(drive);
+       stat = tp_ops->read_status(hwif);
 
        if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
                SELECT_MASK(drive, 0);
@@ -713,8 +786,8 @@ int ide_driveid_update(ide_drive_t *drive)
                local_irq_restore(flags);
                return 0;
        }
-       hwif->input_data(drive, NULL, id, SECTOR_SIZE);
-       (void)ide_read_status(drive);   /* clear drive IRQ */
+       tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+       (void)tp_ops->read_status(hwif);        /* clear drive IRQ */
        local_irq_enable();
        local_irq_restore(flags);
        ide_fix_driveid(id);
@@ -735,9 +808,10 @@ int ide_driveid_update(ide_drive_t *drive)
 int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
 {
        ide_hwif_t *hwif = drive->hwif;
-       struct ide_io_ports *io_ports = &hwif->io_ports;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        int error = 0;
        u8 stat;
+       ide_task_t task;
 
 #ifdef CONFIG_BLK_DEV_IDEDMA
        if (hwif->dma_ops)      /* check if host supports DMA */
@@ -770,12 +844,19 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
        SELECT_DRIVE(drive);
        SELECT_MASK(drive, 0);
        udelay(1);
-       ide_set_irq(drive, 0);
-       hwif->OUTB(speed, io_ports->nsect_addr);
-       hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
-       hwif->OUTBSYNC(hwif, WIN_SETFEATURES, io_ports->command_addr);
+       tp_ops->set_irq(hwif, 0);
+
+       memset(&task, 0, sizeof(task));
+       task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
+       task.tf.feature = SETFEATURES_XFER;
+       task.tf.nsect   = speed;
+
+       tp_ops->tf_load(drive, &task);
+
+       tp_ops->exec_command(hwif, WIN_SETFEATURES);
+
        if (drive->quirk_list == 2)
-               ide_set_irq(drive, 1);
+               tp_ops->set_irq(hwif, 1);
 
        error = __ide_wait_stat(drive, drive->ready_stat,
                                BUSY_STAT|DRQ_STAT|ERR_STAT,
@@ -796,8 +877,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
 
  skip:
 #ifdef CONFIG_BLK_DEV_IDEDMA
-       if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
-           drive->using_dma)
+       if (speed >= XFER_SW_DMA_0 && drive->using_dma)
                hwif->dma_ops->dma_host_set(drive, 1);
        else if (hwif->dma_ops) /* check if host supports DMA */
                ide_dma_off_quietly(drive);
@@ -881,7 +961,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
 
        spin_lock_irqsave(&ide_lock, flags);
        __ide_set_handler(drive, handler, timeout, expiry);
-       hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr);
+       hwif->tp_ops->exec_command(hwif, cmd);
        /*
         * Drive takes 400nS to respond, we must avoid the IRQ being
         * serviced before that.
@@ -899,7 +979,7 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
        unsigned long flags;
 
        spin_lock_irqsave(&ide_lock, flags);
-       hwif->OUTBSYNC(hwif, WIN_PACKETCMD, hwif->io_ports.command_addr);
+       hwif->tp_ops->exec_command(hwif, WIN_PACKETCMD);
        ndelay(400);
        spin_unlock_irqrestore(&ide_lock, flags);
 }
@@ -924,12 +1004,13 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
  */
 static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
 {
-       ide_hwgroup_t *hwgroup  = HWGROUP(drive);
+       ide_hwif_t *hwif = drive->hwif;
+       ide_hwgroup_t *hwgroup = hwif->hwgroup;
        u8 stat;
 
        SELECT_DRIVE(drive);
        udelay (10);
-       stat = ide_read_status(drive);
+       stat = hwif->tp_ops->read_status(hwif);
 
        if (OK_STAT(stat, 0, BUSY_STAT))
                printk("%s: ATAPI reset complete\n", drive->name);
@@ -975,7 +1056,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
                }
        }
 
-       tmp = ide_read_status(drive);
+       tmp = hwif->tp_ops->read_status(hwif);
 
        if (!OK_STAT(tmp, 0, BUSY_STAT)) {
                if (time_before(jiffies, hwgroup->poll_timeout)) {
@@ -1089,8 +1170,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
        ide_hwif_t *hwif;
        ide_hwgroup_t *hwgroup;
        struct ide_io_ports *io_ports;
+       const struct ide_tp_ops *tp_ops;
        const struct ide_port_ops *port_ops;
-       u8 ctl;
 
        spin_lock_irqsave(&ide_lock, flags);
        hwif = HWIF(drive);
@@ -1098,6 +1179,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
 
        io_ports = &hwif->io_ports;
 
+       tp_ops = hwif->tp_ops;
+
        /* We must not reset with running handlers */
        BUG_ON(hwgroup->handler != NULL);
 
@@ -1106,7 +1189,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
                pre_reset(drive);
                SELECT_DRIVE(drive);
                udelay (20);
-               hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr);
+               tp_ops->exec_command(hwif, WIN_SRST);
                ndelay(400);
                hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
                hwgroup->polling = 1;
@@ -1135,16 +1218,15 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
         * immediate interrupt due to the edge transition it produces.
         * This single interrupt gives us a "fast poll" for drives that
         * recover from reset very quickly, saving us the first 50ms wait time.
+        *
+        * TODO: add ->softreset method and stop abusing ->set_irq
         */
        /* set SRST and nIEN */
-       hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr);
+       tp_ops->set_irq(hwif, 4);
        /* more than enough time */
        udelay(10);
-       if (drive->quirk_list == 2)
-               ctl = ATA_DEVCTL_OBS;           /* clear SRST and nIEN */
-       else
-               ctl = ATA_DEVCTL_OBS | 2;       /* clear SRST, leave nIEN */
-       hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
+       /* clear SRST, leave nIEN (unless device is on the quirk list) */
+       tp_ops->set_irq(hwif, drive->quirk_list == 2);
        /* more than enough time */
        udelay(10);
        hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1189,7 +1271,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
                 * about locking issues (2.5 work ?).
                 */
                mdelay(1);
-               stat = hwif->INB(hwif->io_ports.status_addr);
+               stat = hwif->tp_ops->read_status(hwif);
                if ((stat & BUSY_STAT) == 0)
                        return 0;
                /*
index 13af72f09ec499571d0125ce3784cc8b26baabdd..97fefabea8b8daee5203b42114fc9c5ba4933dbd 100644 (file)
@@ -266,22 +266,11 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
 
        rate = ide_rate_filter(drive, rate);
 
+       BUG_ON(rate < XFER_PIO_0);
+
        if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
                return ide_set_pio_mode(drive, rate);
 
-       /*
-        * TODO: transfer modes 0x00-0x07 passed from the user-space are
-        * currently handled here which needs fixing (please note that such
-        * case could happen iff the transfer mode has already been set on
-        * the device by ide-proc.c::set_xfer_rate()).
-        */
-       if (rate < XFER_PIO_0) {
-               if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
-                       return ide_set_dma_mode(drive, rate);
-               else
-                       return ide_config_drive_speed(drive, rate);
-       }
-
        return ide_set_dma_mode(drive, rate);
 }
 
@@ -336,7 +325,7 @@ static void ide_dump_sector(ide_drive_t *drive)
        else
                task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
 
-       drive->hwif->tf_read(drive, &task);
+       drive->hwif->tp_ops->tf_read(drive, &task);
 
        if (lba48 || (tf->device & ATA_LBA))
                printk(", LBAsect=%llu",
index 03f2ef5470a3756cfac07a093110ef4dd234a9e0..bac9b392b68967dfde75c6344cc8af9c6277e79d 100644 (file)
@@ -29,9 +29,10 @@ static struct pnp_device_id idepnp_devices[] = {
 
 static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
 {
-       hw_regs_t hw;
-       ide_hwif_t *hwif;
+       struct ide_host *host;
        unsigned long base, ctl;
+       int rc;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
 
@@ -59,31 +60,25 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
        hw.irq = pnp_irq(dev, 0);
        hw.chipset = ide_generic;
 
-       hwif = ide_find_port();
-       if (hwif) {
-               u8 index = hwif->index;
-               u8 idx[4] = { index, 0xff, 0xff, 0xff };
+       rc = ide_host_add(NULL, hws, &host);
+       if (rc)
+               goto out;
 
-               ide_init_port_hw(hwif, &hw);
-
-               pnp_set_drvdata(dev, hwif);
-
-               ide_device_add(idx, NULL);
-
-               return 0;
-       }
+       pnp_set_drvdata(dev, host);
 
+       return 0;
+out:
        release_region(ctl, 1);
        release_region(base, 8);
 
-       return -1;
+       return rc;
 }
 
 static void idepnp_remove(struct pnp_dev *dev)
 {
-       ide_hwif_t *hwif = pnp_get_drvdata(dev);
+       struct ide_host *host = pnp_get_drvdata(dev);
 
-       ide_unregister(hwif);
+       ide_host_remove(host);
 
        release_region(pnp_port_start(dev, 1), 1);
        release_region(pnp_port_start(dev, 0), 8);
index 235ebdb29b28a75d76ef3eaac651216c98c382ce..4aa76c45375562450dfe836d955dcddf9e27e799 100644 (file)
@@ -39,8 +39,6 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
-
 /**
  *     generic_id              -       add a generic drive id
  *     @drive: drive to make an ID block for
@@ -126,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
 
        id = drive->id;
        /* read 512 bytes of id info */
-       hwif->input_data(drive, NULL, id, SECTOR_SIZE);
+       hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
 
        drive->id_read = 1;
        local_irq_enable();
@@ -267,6 +265,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
 {
        ide_hwif_t *hwif = HWIF(drive);
        struct ide_io_ports *io_ports = &hwif->io_ports;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        int use_altstatus = 0, rc;
        unsigned long timeout;
        u8 s = 0, a = 0;
@@ -275,8 +274,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
        msleep(50);
 
        if (io_ports->ctl_addr) {
-               a = ide_read_altstatus(drive);
-               s = ide_read_status(drive);
+               a = tp_ops->read_altstatus(hwif);
+               s = tp_ops->read_status(hwif);
                if ((a ^ s) & ~INDEX_STAT)
                        /* ancient Seagate drives, broken interfaces */
                        printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
@@ -290,12 +289,18 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
        /* set features register for atapi
         * identify command to be sure of reply
         */
-       if ((cmd == WIN_PIDENTIFY))
-               /* disable dma & overlap */
-               hwif->OUTB(0, io_ports->feature_addr);
+       if (cmd == WIN_PIDENTIFY) {
+               ide_task_t task;
+
+               memset(&task, 0, sizeof(task));
+               /* disable DMA & overlap */
+               task.tf_flags = IDE_TFLAG_OUT_FEATURE;
+
+               tp_ops->tf_load(drive, &task);
+       }
 
        /* ask drive for ID */
-       hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr);
+       tp_ops->exec_command(hwif, cmd);
 
        timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
        timeout += jiffies;
@@ -306,13 +311,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
                }
                /* give drive a breather */
                msleep(50);
-               s = use_altstatus ? ide_read_altstatus(drive)
-                                 : ide_read_status(drive);
+               s = use_altstatus ? tp_ops->read_altstatus(hwif)
+                                 : tp_ops->read_status(hwif);
        } while (s & BUSY_STAT);
 
        /* wait for IRQ and DRQ_STAT */
        msleep(50);
-       s = ide_read_status(drive);
+       s = tp_ops->read_status(hwif);
 
        if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
                unsigned long flags;
@@ -324,7 +329,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
                /* drive responded with ID */
                rc = 0;
                /* clear drive IRQ */
-               (void)ide_read_status(drive);
+               (void)tp_ops->read_status(hwif);
                local_irq_restore(flags);
        } else {
                /* drive refused ID */
@@ -346,6 +351,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
 static int try_to_identify (ide_drive_t *drive, u8 cmd)
 {
        ide_hwif_t *hwif = HWIF(drive);
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        int retval;
        int autoprobe = 0;
        unsigned long cookie = 0;
@@ -361,7 +367,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
                        autoprobe = 1;
                        cookie = probe_irq_on();
                }
-               ide_set_irq(drive, autoprobe);
+               tp_ops->set_irq(hwif, autoprobe);
        }
 
        retval = actual_try_to_identify(drive, cmd);
@@ -369,9 +375,9 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
        if (autoprobe) {
                int irq;
 
-               ide_set_irq(drive, 0);
+               tp_ops->set_irq(hwif, 0);
                /* clear drive IRQ */
-               (void)ide_read_status(drive);
+               (void)tp_ops->read_status(hwif);
                udelay(5);
                irq = probe_irq_off(cookie);
                if (!hwif->irq) {
@@ -396,7 +402,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
 
        do {
                msleep(50);
-               stat = hwif->INB(hwif->io_ports.status_addr);
+               stat = hwif->tp_ops->read_status(hwif);
                if ((stat & BUSY_STAT) == 0)
                        return 0;
        } while (time_before(jiffies, timeout));
@@ -404,6 +410,18 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
        return 1;
 }
 
+static u8 ide_read_device(ide_drive_t *drive)
+{
+       ide_task_t task;
+
+       memset(&task, 0, sizeof(task));
+       task.tf_flags = IDE_TFLAG_IN_DEVICE;
+
+       drive->hwif->tp_ops->tf_read(drive, &task);
+
+       return task.tf.device;
+}
+
 /**
  *     do_probe                -       probe an IDE device
  *     @drive: drive to probe
@@ -428,7 +446,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
 static int do_probe (ide_drive_t *drive, u8 cmd)
 {
        ide_hwif_t *hwif = HWIF(drive);
-       struct ide_io_ports *io_ports = &hwif->io_ports;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        int rc;
        u8 stat;
 
@@ -449,8 +467,8 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
        msleep(50);
        SELECT_DRIVE(drive);
        msleep(50);
-       if (hwif->INB(io_ports->device_addr) != drive->select.all &&
-           !drive->present) {
+
+       if (ide_read_device(drive) != drive->select.all && !drive->present) {
                if (drive->select.b.unit != 0) {
                        /* exit with drive0 selected */
                        SELECT_DRIVE(&hwif->drives[0]);
@@ -461,7 +479,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
                return 3;
        }
 
-       stat = ide_read_status(drive);
+       stat = tp_ops->read_status(hwif);
 
        if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
            drive->present || cmd == WIN_PIDENTIFY) {
@@ -471,7 +489,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
                        rc = try_to_identify(drive,cmd);
                }
 
-               stat = ide_read_status(drive);
+               stat = tp_ops->read_status(hwif);
 
                if (stat == (BUSY_STAT | READY_STAT))
                        return 4;
@@ -482,13 +500,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
                        msleep(50);
                        SELECT_DRIVE(drive);
                        msleep(50);
-                       hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr);
+                       tp_ops->exec_command(hwif, WIN_SRST);
                        (void)ide_busy_sleep(hwif);
                        rc = try_to_identify(drive, cmd);
                }
 
                /* ensure drive IRQ is clear */
-               stat = ide_read_status(drive);
+               stat = tp_ops->read_status(hwif);
 
                if (rc == 1)
                        printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -502,7 +520,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
                SELECT_DRIVE(&hwif->drives[0]);
                msleep(50);
                /* ensure drive irq is clear */
-               (void)ide_read_status(drive);
+               (void)tp_ops->read_status(hwif);
        }
        return rc;
 }
@@ -513,12 +531,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
 static void enable_nest (ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        u8 stat;
 
        printk("%s: enabling %s -- ", hwif->name, drive->id->model);
        SELECT_DRIVE(drive);
        msleep(50);
-       hwif->OUTBSYNC(hwif, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
+       tp_ops->exec_command(hwif, EXABYTE_ENABLE_NEST);
 
        if (ide_busy_sleep(hwif)) {
                printk(KERN_CONT "failed (timeout)\n");
@@ -527,7 +546,7 @@ static void enable_nest (ide_drive_t *drive)
 
        msleep(50);
 
-       stat = ide_read_status(drive);
+       stat = tp_ops->read_status(hwif);
 
        if (!OK_STAT(stat, 0, BAD_STAT))
                printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -619,7 +638,7 @@ static inline u8 probe_for_drive (ide_drive_t *drive)
        return drive->present;
 }
 
-static void hwif_release_dev (struct device *dev)
+static void hwif_release_dev(struct device *dev)
 {
        ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
 
@@ -709,7 +728,7 @@ static int ide_port_wait_ready(ide_hwif_t *hwif)
                /* Ignore disks that we will not probe for later. */
                if (!drive->noprobe || drive->present) {
                        SELECT_DRIVE(drive);
-                       ide_set_irq(drive, 1);
+                       hwif->tp_ops->set_irq(hwif, 1);
                        mdelay(2);
                        rc = ide_wait_not_busy(hwif, 35000);
                        if (rc)
@@ -971,6 +990,45 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
        mutex_unlock(&ide_cfg_mtx);
 }
 
+static ide_hwif_t *ide_ports[MAX_HWIFS];
+
+void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
+{
+       ide_hwgroup_t *hwgroup = hwif->hwgroup;
+
+       ide_ports[hwif->index] = NULL;
+
+       spin_lock_irq(&ide_lock);
+       /*
+        * Remove us from the hwgroup, and free
+        * the hwgroup if we were the only member
+        */
+       if (hwif->next == hwif) {
+               BUG_ON(hwgroup->hwif != hwif);
+               kfree(hwgroup);
+       } else {
+               /* There is another interface in hwgroup.
+                * Unlink us, and set hwgroup->drive and ->hwif to
+                * something sane.
+                */
+               ide_hwif_t *g = hwgroup->hwif;
+
+               while (g->next != hwif)
+                       g = g->next;
+               g->next = hwif->next;
+               if (hwgroup->hwif == hwif) {
+                       /* Chose a random hwif for hwgroup->hwif.
+                        * It's guaranteed that there are no drives
+                        * left in the hwgroup.
+                        */
+                       BUG_ON(hwgroup->drive != NULL);
+                       hwgroup->hwif = g;
+               }
+               BUG_ON(hwgroup->hwif == hwif);
+       }
+       spin_unlock_irq(&ide_lock);
+}
+
 /*
  * This routine sets up the irq for an ide interface, and creates a new
  * hwgroup for the irq/hwif if none was previously assigned.
@@ -998,8 +1056,9 @@ static int init_irq (ide_hwif_t *hwif)
         * Group up with any other hwifs that share our irq(s).
         */
        for (index = 0; index < MAX_HWIFS; index++) {
-               ide_hwif_t *h = &ide_hwifs[index];
-               if (h->hwgroup) {  /* scan only initialized hwif's */
+               ide_hwif_t *h = ide_ports[index];
+
+               if (h && h->hwgroup) {  /* scan only initialized ports */
                        if (hwif->irq == h->irq) {
                                hwif->sharing_irq = h->sharing_irq = 1;
                                if (hwif->chipset != ide_pci ||
@@ -1053,6 +1112,8 @@ static int init_irq (ide_hwif_t *hwif)
                hwgroup->timer.data = (unsigned long) hwgroup;
        }
 
+       ide_ports[hwif->index] = hwif;
+
        /*
         * Allocate the irq, if not already obtained for another hwif
         */
@@ -1066,8 +1127,7 @@ static int init_irq (ide_hwif_t *hwif)
                        sa = IRQF_SHARED;
 
                if (io_ports->ctl_addr)
-                       /* clear nIEN */
-                       hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr);
+                       hwif->tp_ops->set_irq(hwif, 1);
 
                if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
                        goto out_unlink;
@@ -1345,6 +1405,9 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
        hwif->host_flags |= d->host_flags;
        hwif->pio_mask = d->pio_mask;
 
+       if (d->tp_ops)
+               hwif->tp_ops = d->tp_ops;
+
        /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
        if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
                hwif->port_ops = d->port_ops;
@@ -1363,6 +1426,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
 
                if (rc < 0) {
                        printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
+                       hwif->dma_base = 0;
                        hwif->swdma_mask = 0;
                        hwif->mwdma_mask = 0;
                        hwif->ultra_mask = 0;
@@ -1446,18 +1510,20 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
        return rc;
 }
 
+static unsigned int ide_indexes;
+
 /**
- *     ide_find_port_slot      -       find free ide_hwifs[] slot
+ *     ide_find_port_slot      -       find free port slot
  *     @d: IDE port info
  *
- *     Return the new hwif.  If we are out of free slots return NULL.
+ *     Return the new port slot index or -ENOENT if we are out of free slots.
  */
 
-ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
+static int ide_find_port_slot(const struct ide_port_info *d)
 {
-       ide_hwif_t *hwif;
-       int i;
+       int idx = -ENOENT;
        u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
+       u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
 
        /*
         * Claim an unassigned slot.
@@ -1469,51 +1535,106 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
         * Unless there is a bootable card that does not use the standard
         * ports 0x1f0/0x170 (the ide0/ide1 defaults).
         */
-       if (bootable) {
-               i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
-
-               for (; i < MAX_HWIFS; i++) {
-                       hwif = &ide_hwifs[i];
-                       if (hwif->chipset == ide_unknown)
-                               goto out_found;
-               }
+       mutex_lock(&ide_cfg_mtx);
+       if (MAX_HWIFS == 1) {
+               if (ide_indexes == 0 && i == 0)
+                       idx = 1;
        } else {
-               for (i = 2; i < MAX_HWIFS; i++) {
-                       hwif = &ide_hwifs[i];
-                       if (hwif->chipset == ide_unknown)
-                               goto out_found;
+               if (bootable) {
+                       if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
+                               idx = ffz(ide_indexes | i);
+               } else {
+                       if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
+                               idx = ffz(ide_indexes | 3);
+                       else if ((ide_indexes & 3) != 3)
+                               idx = ffz(ide_indexes);
                }
-               for (i = 0; i < 2 && i < MAX_HWIFS; i++) {
-                       hwif = &ide_hwifs[i];
-                       if (hwif->chipset == ide_unknown)
-                               goto out_found;
+       }
+       if (idx >= 0)
+               ide_indexes |= (1 << idx);
+       mutex_unlock(&ide_cfg_mtx);
+
+       return idx;
+}
+
+static void ide_free_port_slot(int idx)
+{
+       mutex_lock(&ide_cfg_mtx);
+       ide_indexes &= ~(1 << idx);
+       mutex_unlock(&ide_cfg_mtx);
+}
+
+struct ide_host *ide_host_alloc_all(const struct ide_port_info *d,
+                                   hw_regs_t **hws)
+{
+       struct ide_host *host;
+       int i;
+
+       host = kzalloc(sizeof(*host), GFP_KERNEL);
+       if (host == NULL)
+               return NULL;
+
+       for (i = 0; i < MAX_HWIFS; i++) {
+               ide_hwif_t *hwif;
+               int idx;
+
+               if (hws[i] == NULL)
+                       continue;
+
+               hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+               if (hwif == NULL)
+                       continue;
+
+               idx = ide_find_port_slot(d);
+               if (idx < 0) {
+                       printk(KERN_ERR "%s: no free slot for interface\n",
+                                       d ? d->name : "ide");
+                       kfree(hwif);
+                       continue;
                }
+
+               ide_init_port_data(hwif, idx);
+
+               host->ports[i] = hwif;
+               host->n_ports++;
        }
 
-       printk(KERN_ERR "%s: no free slot for interface\n",
-                       d ? d->name : "ide");
+       if (host->n_ports == 0) {
+               kfree(host);
+               return NULL;
+       }
 
-       return NULL;
+       return host;
+}
+EXPORT_SYMBOL_GPL(ide_host_alloc_all);
+
+struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
+{
+       hw_regs_t *hws_all[MAX_HWIFS];
+       int i;
 
-out_found:
-       ide_init_port_data(hwif, i);
-       return hwif;
+       for (i = 0; i < MAX_HWIFS; i++)
+               hws_all[i] = (i < 4) ? hws[i] : NULL;
+
+       return ide_host_alloc_all(d, hws_all);
 }
-EXPORT_SYMBOL_GPL(ide_find_port_slot);
+EXPORT_SYMBOL_GPL(ide_host_alloc);
 
-int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
+int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
+                     hw_regs_t **hws)
 {
        ide_hwif_t *hwif, *mate = NULL;
-       int i, rc = 0;
+       int i, j = 0;
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               if (idx[i] == 0xff) {
+               hwif = host->ports[i];
+
+               if (hwif == NULL) {
                        mate = NULL;
                        continue;
                }
 
-               hwif = &ide_hwifs[idx[i]];
-
+               ide_init_port_hw(hwif, hws[i]);
                ide_port_apply_params(hwif);
 
                if (d == NULL) {
@@ -1534,10 +1655,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               if (idx[i] == 0xff)
-                       continue;
+               hwif = host->ports[i];
 
-               hwif = &ide_hwifs[idx[i]];
+               if (hwif == NULL)
+                       continue;
 
                if (ide_probe_port(hwif) == 0)
                        hwif->present = 1;
@@ -1551,19 +1672,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               if (idx[i] == 0xff)
-                       continue;
+               hwif = host->ports[i];
 
-               hwif = &ide_hwifs[idx[i]];
+               if (hwif == NULL)
+                       continue;
 
                if (hwif_init(hwif) == 0) {
                        printk(KERN_INFO "%s: failed to initialize IDE "
                                         "interface\n", hwif->name);
                        hwif->present = 0;
-                       rc = -1;
                        continue;
                }
 
+               j++;
+
                if (hwif->present)
                        ide_port_setup_devices(hwif);
 
@@ -1574,10 +1696,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               if (idx[i] == 0xff)
-                       continue;
+               hwif = host->ports[i];
 
-               hwif = &ide_hwifs[idx[i]];
+               if (hwif == NULL)
+                       continue;
 
                if (hwif->chipset == ide_unknown)
                        hwif->chipset = ide_generic;
@@ -1587,10 +1709,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               if (idx[i] == 0xff)
-                       continue;
+               hwif = host->ports[i];
 
-               hwif = &ide_hwifs[idx[i]];
+               if (hwif == NULL)
+                       continue;
 
                ide_sysfs_register_port(hwif);
                ide_proc_register_port(hwif);
@@ -1599,21 +1721,64 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
                        ide_proc_port_register_devices(hwif);
        }
 
-       return rc;
+       return j ? 0 : -1;
 }
-EXPORT_SYMBOL_GPL(ide_device_add_all);
+EXPORT_SYMBOL_GPL(ide_host_register);
 
-int ide_device_add(u8 idx[4], const struct ide_port_info *d)
+int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
+                struct ide_host **hostp)
 {
-       u8 idx_all[MAX_HWIFS];
+       struct ide_host *host;
+       int rc;
+
+       host = ide_host_alloc(d, hws);
+       if (host == NULL)
+               return -ENOMEM;
+
+       rc = ide_host_register(host, d, hws);
+       if (rc) {
+               ide_host_free(host);
+               return rc;
+       }
+
+       if (hostp)
+               *hostp = host;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ide_host_add);
+
+void ide_host_free(struct ide_host *host)
+{
+       ide_hwif_t *hwif;
        int i;
 
-       for (i = 0; i < MAX_HWIFS; i++)
-               idx_all[i] = (i < 4) ? idx[i] : 0xff;
+       for (i = 0; i < MAX_HWIFS; i++) {
+               hwif = host->ports[i];
 
-       return ide_device_add_all(idx_all, d);
+               if (hwif == NULL)
+                       continue;
+
+               ide_free_port_slot(hwif->index);
+               kfree(hwif);
+       }
+
+       kfree(host);
 }
-EXPORT_SYMBOL_GPL(ide_device_add);
+EXPORT_SYMBOL_GPL(ide_host_free);
+
+void ide_host_remove(struct ide_host *host)
+{
+       int i;
+
+       for (i = 0; i < MAX_HWIFS; i++) {
+               if (host->ports[i])
+                       ide_unregister(host->ports[i]);
+       }
+
+       ide_host_free(host);
+}
+EXPORT_SYMBOL_GPL(ide_host_remove);
 
 void ide_port_scan(ide_hwif_t *hwif)
 {
@@ -1634,11 +1799,10 @@ void ide_port_scan(ide_hwif_t *hwif)
 }
 EXPORT_SYMBOL_GPL(ide_port_scan);
 
-static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
-                               const struct ide_port_info *d,
+static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
+                               u8 port_no, const struct ide_port_info *d,
                                unsigned long config)
 {
-       ide_hwif_t *hwif;
        unsigned long base, ctl;
        int irq;
 
@@ -1668,33 +1832,25 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
        ide_std_init_ports(hw, base, ctl);
        hw->irq = irq;
        hw->chipset = d->chipset;
+       hw->config = config;
 
-       hwif = ide_find_port_slot(d);
-       if (hwif) {
-               ide_init_port_hw(hwif, hw);
-               if (config)
-                       hwif->config_data = config;
-               idx[port_no] = hwif->index;
-       }
+       hws[port_no] = hw;
 }
 
 int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
 {
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
+       hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
 
        memset(&hw, 0, sizeof(hw));
 
        if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
-               ide_legacy_init_one(idx, &hw[0], 0, d, config);
-       ide_legacy_init_one(idx, &hw[1], 1, d, config);
+               ide_legacy_init_one(hws, &hw[0], 0, d, config);
+       ide_legacy_init_one(hws, &hw[1], 1, d, config);
 
-       if (idx[0] == 0xff && idx[1] == 0xff &&
+       if (hws[0] == NULL && hws[1] == NULL &&
            (d->host_flags & IDE_HFLAG_SINGLE))
                return -ENOENT;
 
-       ide_device_add(idx, d);
-
-       return 0;
+       return ide_host_add(d, hws, NULL);
 }
 EXPORT_SYMBOL_GPL(ide_legacy_device_add);
index 8af88bf0969bde8bf41926e1e5834deed20022f6..151c91e933dab156adb2606f4a55384fc4866ad0 100644 (file)
@@ -345,7 +345,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
        ide_task_t task;
        int err;
 
-       if (arg < 0 || arg > 70)
+       if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
                return -EINVAL;
 
        memset(&task, 0, sizeof(task));
@@ -357,7 +357,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
 
        err = ide_no_data_taskfile(drive, &task);
 
-       if (!err && arg) {
+       if (!err) {
                ide_set_xfer_rate(drive, (u8) arg);
                ide_driveid_update(drive);
        }
index 353dd11b9283ed0ba312ad7e8dc6d8c56dbb28e6..6962ca4891a134517774fa9dbf7ec533ee938b5b 100644 (file)
@@ -195,23 +195,6 @@ enum {
 #define IDETAPE_BLOCK_DESCRIPTOR       0
 #define IDETAPE_CAPABILITIES_PAGE      0x2a
 
-/* Tape flag bits values. */
-enum {
-       IDETAPE_FLAG_IGNORE_DSC         = (1 << 0),
-       /* 0 When the tape position is unknown */
-       IDETAPE_FLAG_ADDRESS_VALID      = (1 << 1),
-       /* Device already opened */
-       IDETAPE_FLAG_BUSY               = (1 << 2),
-       /* Attempt to auto-detect the current user block size */
-       IDETAPE_FLAG_DETECT_BS          = (1 << 3),
-       /* Currently on a filemark */
-       IDETAPE_FLAG_FILEMARK           = (1 << 4),
-       /* DRQ interrupt device */
-       IDETAPE_FLAG_DRQ_INTERRUPT      = (1 << 5),
-       /* 0 = no tape is loaded, so we don't rewind after ejecting */
-       IDETAPE_FLAG_MEDIUM_PRESENT     = (1 << 6),
-};
-
 /*
  * Most of our global data which we need to save even as we leave the driver due
  * to an interrupt or a timer event is stored in the struct defined below.
@@ -312,8 +295,6 @@ typedef struct ide_tape_obj {
        /* Wasted space in each stage */
        int excess_bh_size;
 
-       /* Status/Action flags: long for set_bit */
-       unsigned long flags;
        /* protects the ide-tape queue */
        spinlock_t lock;
 
@@ -398,7 +379,7 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                count = min(
                        (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
                        bcount);
-               drive->hwif->input_data(drive, NULL, bh->b_data +
+               drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
                                        atomic_read(&bh->b_count), count);
                bcount -= count;
                atomic_add(count, &bh->b_count);
@@ -424,7 +405,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                        return;
                }
                count = min((unsigned int)pc->b_count, (unsigned int)bcount);
-               drive->hwif->output_data(drive, NULL, pc->b_data, count);
+               drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
                bcount -= count;
                pc->b_data += count;
                pc->b_count -= count;
@@ -585,7 +566,6 @@ static void ide_tape_kfree_buffer(idetape_tape_t *tape)
                bh = bh->b_reqnext;
                kfree(prev_bh);
        }
-       kfree(tape->merge_bh);
 }
 
 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
@@ -665,7 +645,7 @@ static void ide_tape_callback(ide_drive_t *drive)
                if (readpos[0] & 0x4) {
                        printk(KERN_INFO "ide-tape: Block location is unknown"
                                         "to the tape\n");
-                       clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
+                       clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
                        uptodate = 0;
                } else {
                        debug_log(DBG_SENSE, "Block Location - %u\n",
@@ -673,7 +653,7 @@ static void ide_tape_callback(ide_drive_t *drive)
 
                        tape->partition = readpos[1];
                        tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]);
-                       set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
+                       set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
                }
        }
 
@@ -690,7 +670,6 @@ static void idetape_init_pc(struct ide_atapi_pc *pc)
        pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
        pc->bh = NULL;
        pc->b_data = NULL;
-       pc->callback = ide_tape_callback;
 }
 
 static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -705,7 +684,7 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
 {
        blk_rq_init(NULL, rq);
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->cmd[0] = cmd;
+       rq->cmd[13] = cmd;
 }
 
 /*
@@ -732,6 +711,7 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
        rq->cmd_flags |= REQ_PREEMPT;
        rq->buffer = (char *) pc;
        rq->rq_disk = tape->disk;
+       memcpy(rq->cmd, pc->c, 12);
        ide_do_drive_cmd(drive, rq);
 }
 
@@ -742,7 +722,6 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
  */
 static void idetape_retry_pc(ide_drive_t *drive)
 {
-       idetape_tape_t *tape = drive->driver_data;
        struct ide_atapi_pc *pc;
        struct request *rq;
 
@@ -750,7 +729,7 @@ static void idetape_retry_pc(ide_drive_t *drive)
        pc = idetape_next_pc_storage(drive);
        rq = idetape_next_rq_storage(drive);
        idetape_create_request_sense_cmd(pc);
-       set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
+       set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
        idetape_queue_pc_head(drive, pc, rq);
 }
 
@@ -887,7 +866,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
                        pc->error = IDETAPE_ERROR_GENERAL;
                }
                tape->failed_pc = NULL;
-               pc->callback(drive);
+               drive->pc_callback(drive);
                return ide_stopped;
        }
        debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -927,11 +906,12 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
 
 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
 {
+       ide_hwif_t *hwif = drive->hwif;
        idetape_tape_t *tape = drive->driver_data;
        struct ide_atapi_pc *pc = tape->pc;
        u8 stat;
 
-       stat = ide_read_status(drive);
+       stat = hwif->tp_ops->read_status(hwif);
 
        if (stat & SEEK_STAT) {
                if (stat & ERR_STAT) {
@@ -948,14 +928,17 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
                pc->error = IDETAPE_ERROR_GENERAL;
                tape->failed_pc = NULL;
        }
-       pc->callback(drive);
+       drive->pc_callback(drive);
        return ide_stopped;
 }
 
 static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
-               struct ide_atapi_pc *pc, unsigned int length,
-               struct idetape_bh *bh, u8 opcode)
+                                  struct ide_atapi_pc *pc, struct request *rq,
+                                  u8 opcode)
 {
+       struct idetape_bh *bh = (struct idetape_bh *)rq->special;
+       unsigned int length = rq->current_nr_sectors;
+
        idetape_init_pc(pc);
        put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
        pc->c[1] = 1;
@@ -975,11 +958,14 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
                pc->b_data = bh->b_data;
                pc->b_count = atomic_read(&bh->b_count);
        }
+
+       memcpy(rq->cmd, pc->c, 12);
 }
 
 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
                                          struct request *rq, sector_t block)
 {
+       ide_hwif_t *hwif = drive->hwif;
        idetape_tape_t *tape = drive->driver_data;
        struct ide_atapi_pc *pc = NULL;
        struct request *postponed_rq = tape->postponed_rq;
@@ -1017,17 +1003,17 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
         * If the tape is still busy, postpone our request and service
         * the other device meanwhile.
         */
-       stat = ide_read_status(drive);
+       stat = hwif->tp_ops->read_status(hwif);
 
-       if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
-               set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
+       if (!drive->dsc_overlap && !(rq->cmd[13] & REQ_IDETAPE_PC2))
+               set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
 
        if (drive->post_reset == 1) {
-               set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
+               set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
                drive->post_reset = 0;
        }
 
-       if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
+       if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
            (stat & SEEK_STAT) == 0) {
                if (postponed_rq == NULL) {
                        tape->dsc_polling_start = jiffies;
@@ -1036,7 +1022,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
                } else if (time_after(jiffies, tape->dsc_timeout)) {
                        printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
                                tape->name);
-                       if (rq->cmd[0] & REQ_IDETAPE_PC2) {
+                       if (rq->cmd[13] & REQ_IDETAPE_PC2) {
                                idetape_media_access_finished(drive);
                                return ide_stopped;
                        } else {
@@ -1049,35 +1035,29 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
                idetape_postpone_request(drive);
                return ide_stopped;
        }
-       if (rq->cmd[0] & REQ_IDETAPE_READ) {
+       if (rq->cmd[13] & REQ_IDETAPE_READ) {
                pc = idetape_next_pc_storage(drive);
-               ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors,
-                                       (struct idetape_bh *)rq->special,
-                                       READ_6);
+               ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
                goto out;
        }
-       if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
+       if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
                pc = idetape_next_pc_storage(drive);
-               ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors,
-                                        (struct idetape_bh *)rq->special,
-                                        WRITE_6);
+               ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
                goto out;
        }
-       if (rq->cmd[0] & REQ_IDETAPE_PC1) {
+       if (rq->cmd[13] & REQ_IDETAPE_PC1) {
                pc = (struct ide_atapi_pc *) rq->buffer;
-               rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
-               rq->cmd[0] |= REQ_IDETAPE_PC2;
+               rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
+               rq->cmd[13] |= REQ_IDETAPE_PC2;
                goto out;
        }
-       if (rq->cmd[0] & REQ_IDETAPE_PC2) {
+       if (rq->cmd[13] & REQ_IDETAPE_PC2) {
                idetape_media_access_finished(drive);
                return ide_stopped;
        }
        BUG();
-out:
-       if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags))
-               pc->flags |= PC_FLAG_DRQ_INTERRUPT;
 
+out:
        return idetape_issue_pc(drive, pc);
 }
 
@@ -1281,8 +1261,9 @@ static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
 
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->cmd[0] = REQ_IDETAPE_PC1;
+       rq->cmd[13] = REQ_IDETAPE_PC1;
        rq->buffer = (char *)pc;
+       memcpy(rq->cmd, pc->c, 12);
        error = blk_execute_rq(drive->queue, tape->disk, rq, 0);
        blk_put_request(rq);
        return error;
@@ -1304,7 +1285,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
        int load_attempted = 0;
 
        /* Wait for the tape to become ready */
-       set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
+       set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
        timeout += jiffies;
        while (time_before(jiffies, timeout)) {
                idetape_create_test_unit_ready_cmd(&pc);
@@ -1397,7 +1378,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
        if (tape->chrdev_dir != IDETAPE_DIR_READ)
                return;
 
-       clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
+       clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
        tape->merge_bh_size = 0;
        if (tape->merge_bh != NULL) {
                ide_tape_kfree_buffer(tape);
@@ -1465,7 +1446,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
 
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->cmd[0] = cmd;
+       rq->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
        rq->special = (void *)bh;
        rq->sector = tape->first_frame;
@@ -1636,7 +1617,7 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
        debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
 
        /* If we are at a filemark, return a read length of 0 */
-       if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
+       if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
                return 0;
 
        idetape_init_read(drive);
@@ -1746,7 +1727,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
 
        if (tape->chrdev_dir == IDETAPE_DIR_READ) {
                tape->merge_bh_size = 0;
-               if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
+               if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
                        ++count;
                ide_tape_discard_merge_buffer(drive, 0);
        }
@@ -1801,7 +1782,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
        debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
 
        if (tape->chrdev_dir != IDETAPE_DIR_READ) {
-               if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
+               if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
                        if (count > tape->blk_size &&
                            (count % tape->blk_size) == 0)
                                tape->user_bs_factor = count / tape->blk_size;
@@ -1841,7 +1822,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
                tape->merge_bh_size = bytes_read-temp;
        }
 finish:
-       if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
+       if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
                debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
 
                idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -2027,7 +2008,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
                                              !IDETAPE_LU_LOAD_MASK);
                retval = idetape_queue_pc_tail(drive, &pc);
                if (!retval)
-                       clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
+                       clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
                return retval;
        case MTNOP:
                ide_tape_discard_merge_buffer(drive, 0);
@@ -2050,9 +2031,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
                            mt_count % tape->blk_size)
                                return -EIO;
                        tape->user_bs_factor = mt_count / tape->blk_size;
-                       clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
+                       clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
                } else
-                       set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
+                       set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
                return 0;
        case MTSEEK:
                ide_tape_discard_merge_buffer(drive, 0);
@@ -2202,20 +2183,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
 
        filp->private_data = tape;
 
-       if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
+       if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
                retval = -EBUSY;
                goto out_put_tape;
        }
 
        retval = idetape_wait_ready(drive, 60 * HZ);
        if (retval) {
-               clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
+               clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
                printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
                goto out_put_tape;
        }
 
        idetape_read_position(drive);
-       if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
+       if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
                (void)idetape_rewind_tape(drive);
 
        /* Read block size and write protect status from drive. */
@@ -2231,7 +2212,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
        if (tape->write_prot) {
                if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
                    (filp->f_flags & O_ACCMODE) == O_RDWR) {
-                       clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
+                       clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
                        retval = -EROFS;
                        goto out_put_tape;
                }
@@ -2291,7 +2272,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
                        ide_tape_discard_merge_buffer(drive, 1);
        }
 
-       if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
+       if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
                (void) idetape_rewind_tape(drive);
        if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
                if (tape->door_locked == DOOR_LOCKED) {
@@ -2301,7 +2282,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
                        }
                }
        }
-       clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
+       clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
        ide_tape_put(tape);
        unlock_kernel();
        return 0;
@@ -2464,6 +2445,8 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
        u8 gcw[2];
        u16 *ctl = (u16 *)&tape->caps[12];
 
+       drive->pc_callback = ide_tape_callback;
+
        spin_lock_init(&tape->lock);
        drive->dsc_overlap = 1;
        if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
@@ -2484,7 +2467,7 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
 
        /* Command packet DRQ type */
        if (((gcw[0] & 0x60) >> 5) == 1)
-               set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
+               set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
 
        idetape_get_inquiry_results(drive);
        idetape_get_mode_sense_results(drive);
index 1fbdb746dc88a761a7edf9a66d49e7f68e66b179..aeddbbd69e862e6526ee25d7a83e6143e1c5468f 100644 (file)
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
        ide_hwif_t *hwif        = HWIF(drive);
        struct ide_taskfile *tf = &task->tf;
        ide_handler_t *handler = NULL;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
        const struct ide_dma_ops *dma_ops = hwif->dma_ops;
 
        if (task->data_phase == TASKFILE_MULTI_IN ||
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
 
        if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
                ide_tf_dump(drive->name, tf);
-               ide_set_irq(drive, 1);
+               tp_ops->set_irq(hwif, 1);
                SELECT_MASK(drive, 0);
-               hwif->tf_load(drive, task);
+               tp_ops->tf_load(drive, task);
        }
 
        switch (task->data_phase) {
        case TASKFILE_MULTI_OUT:
        case TASKFILE_OUT:
-               hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr);
+               tp_ops->exec_command(hwif, tf->command);
                ndelay(400);    /* FIXME */
                return pre_task_out_intr(drive, task->rq);
        case TASKFILE_MULTI_IN:
@@ -124,7 +125,8 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
  */
 static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
 {
-       u8 stat = ide_read_status(drive);
+       ide_hwif_t *hwif = drive->hwif;
+       u8 stat = hwif->tp_ops->read_status(hwif);
 
        if (OK_STAT(stat, READY_STAT, BAD_STAT))
                drive->mult_count = drive->mult_req;
@@ -141,11 +143,16 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
  */
 static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
 {
+       ide_hwif_t *hwif = drive->hwif;
        int retries = 5;
        u8 stat;
 
-       while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--)
+       while (1) {
+               stat = hwif->tp_ops->read_status(hwif);
+               if ((stat & BUSY_STAT) == 0 || retries-- == 0)
+                       break;
                udelay(10);
+       };
 
        if (OK_STAT(stat, READY_STAT, BAD_STAT))
                return ide_stopped;
@@ -162,7 +169,8 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
  */
 static ide_startstop_t recal_intr(ide_drive_t *drive)
 {
-       u8 stat = ide_read_status(drive);
+       ide_hwif_t *hwif = drive->hwif;
+       u8 stat = hwif->tp_ops->read_status(hwif);
 
        if (!OK_STAT(stat, READY_STAT, BAD_STAT))
                return ide_error(drive, "recal_intr", stat);
@@ -174,11 +182,12 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
  */
 static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
 {
-       ide_task_t *args        = HWGROUP(drive)->rq->special;
+       ide_hwif_t *hwif = drive->hwif;
+       ide_task_t *args = hwif->hwgroup->rq->special;
        u8 stat;
 
        local_irq_enable_in_hardirq();
-       stat = ide_read_status(drive);
+       stat = hwif->tp_ops->read_status(hwif);
 
        if (!OK_STAT(stat, READY_STAT, BAD_STAT))
                return ide_error(drive, "task_no_data_intr", stat);
@@ -192,6 +201,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
 
 static u8 wait_drive_not_busy(ide_drive_t *drive)
 {
+       ide_hwif_t *hwif = drive->hwif;
        int retries;
        u8 stat;
 
@@ -200,7 +210,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
         * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
         */
        for (retries = 0; retries < 1000; retries++) {
-               stat = ide_read_status(drive);
+               stat = hwif->tp_ops->read_status(hwif);
 
                if (stat & BUSY_STAT)
                        udelay(10);
@@ -255,9 +265,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
 
        /* do the actual data transfer */
        if (write)
-               hwif->output_data(drive, rq, buf, SECTOR_SIZE);
+               hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
        else
-               hwif->input_data(drive, rq, buf, SECTOR_SIZE);
+               hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
 
        kunmap_atomic(buf, KM_BIO_SRC_IRQ);
 #ifdef CONFIG_HIGHMEM
@@ -383,8 +393,8 @@ static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq
 static ide_startstop_t task_in_intr(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
-       struct request *rq = HWGROUP(drive)->rq;
-       u8 stat = ide_read_status(drive);
+       struct request *rq = hwif->hwgroup->rq;
+       u8 stat = hwif->tp_ops->read_status(hwif);
 
        /* Error? */
        if (stat & ERR_STAT)
@@ -418,7 +428,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq = HWGROUP(drive)->rq;
-       u8 stat = ide_read_status(drive);
+       u8 stat = hwif->tp_ops->read_status(hwif);
 
        if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
                return task_error(drive, rq, __func__, stat);
index d4a6b102a77227d29b4052281704f4053700156d..60f0ca66aa93735d68f27478888cf01132457d47 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  Copyright (C) 1994-1998        Linus Torvalds & authors (see below)
- *  Copyrifht (C) 2003-2005, 2007   Bartlomiej Zolnierkiewicz
+ *  Copyright (C) 2003-2005, 2007   Bartlomiej Zolnierkiewicz
  */
 
 /*
@@ -101,8 +101,7 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
 
        init_completion(&hwif->gendev_rel_comp);
 
-       default_hwif_iops(hwif);
-       default_hwif_transport(hwif);
+       hwif->tp_ops = &default_tp_ops;
 
        ide_port_init_devices_data(hwif);
 }
@@ -134,41 +133,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
        }
 }
 
-void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
-{
-       ide_hwgroup_t *hwgroup = hwif->hwgroup;
-
-       spin_lock_irq(&ide_lock);
-       /*
-        * Remove us from the hwgroup, and free
-        * the hwgroup if we were the only member
-        */
-       if (hwif->next == hwif) {
-               BUG_ON(hwgroup->hwif != hwif);
-               kfree(hwgroup);
-       } else {
-               /* There is another interface in hwgroup.
-                * Unlink us, and set hwgroup->drive and ->hwif to
-                * something sane.
-                */
-               ide_hwif_t *g = hwgroup->hwif;
-
-               while (g->next != hwif)
-                       g = g->next;
-               g->next = hwif->next;
-               if (hwgroup->hwif == hwif) {
-                       /* Chose a random hwif for hwgroup->hwif.
-                        * It's guaranteed that there are no drives
-                        * left in the hwgroup.
-                        */
-                       BUG_ON(hwgroup->drive != NULL);
-                       hwgroup->hwif = g;
-               }
-               BUG_ON(hwgroup->hwif == hwif);
-       }
-       spin_unlock_irq(&ide_lock);
-}
-
 /* Called with ide_lock held. */
 static void __ide_port_unregister_devices(ide_hwif_t *hwif)
 {
@@ -269,16 +233,9 @@ void ide_unregister(ide_hwif_t *hwif)
        if (hwif->dma_base)
                ide_release_dma_engine(hwif);
 
-       spin_lock_irq(&ide_lock);
-       /* restore hwif data to pristine status */
-       ide_init_port_data(hwif, hwif->index);
-       spin_unlock_irq(&ide_lock);
-
        mutex_unlock(&ide_cfg_mtx);
 }
 
-EXPORT_SYMBOL(ide_unregister);
-
 void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
 {
        memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
@@ -287,8 +244,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
        hwif->dev = hw->dev;
        hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
        hwif->ack_intr = hw->ack_intr;
+       hwif->config_data = hw->config;
 }
-EXPORT_SYMBOL_GPL(ide_init_port_hw);
 
 /*
  *     Locks for IDE setting functionality
index 0497e7f85b09112c5fb39c1e5901e3d4231cb6f4..7c2afa97f41775e410835d539a174d14452f75ed 100644 (file)
@@ -37,6 +37,8 @@
 #define CATWEASEL_NUM_HWIFS    3
 #define XSURF_NUM_HWIFS         2
 
+#define MAX_NUM_HWIFS          3
+
     /*
      *  Bases of the IDE interfaces (relative to the board address)
      */
@@ -148,18 +150,14 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
 
 static int __init buddha_init(void)
 {
-       hw_regs_t hw;
-       ide_hwif_t *hwif;
-       int i;
-
        struct zorro_dev *z = NULL;
        u_long buddha_board = 0;
        BuddhaType type;
-       int buddha_num_hwifs;
+       int buddha_num_hwifs, i;
 
        while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
                unsigned long board;
-               u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+               hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
 
                if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
                        buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -221,19 +219,13 @@ fail_base2:
                                ack_intr = xsurf_ack_intr;
                        }
 
-                       buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
+                       buddha_setup_ports(&hw[i], base, ctl, irq_port,
+                                          ack_intr);
 
-                       hwif = ide_find_port();
-                       if (hwif) {
-                               u8 index = hwif->index;
-
-                               ide_init_port_hw(hwif, &hw);
-
-                               idx[i] = index;
-                       }
+                       hws[i] = &hw[i];
                }
 
-               ide_device_add(idx, NULL);
+               ide_host_add(NULL, hws, NULL);
        }
 
        return 0;
index 129a812bb57f5f124e4f3d7ef8f43b3356501c9f..724f95073d803df6ad53cfd23ebed54449c1402a 100644 (file)
@@ -66,6 +66,27 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq,
        outsw_swapw(data_addr, buf, (len + 1) / 2);
 }
 
+/* Atari has a byte-swapped IDE interface */
+static const struct ide_tp_ops falconide_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = ide_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = ide_set_irq,
+
+       .tf_load                = ide_tf_load,
+       .tf_read                = ide_tf_read,
+
+       .input_data             = falconide_input_data,
+       .output_data            = falconide_output_data,
+};
+
+static const struct ide_port_info falconide_port_info = {
+       .tp_ops                 = &falconide_tp_ops,
+       .host_flags             = IDE_HFLAG_NO_DMA,
+};
+
 static void __init falconide_setup_ports(hw_regs_t *hw)
 {
        int i;
@@ -91,11 +112,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
 
 static int __init falconide_init(void)
 {
-       hw_regs_t hw;
-       ide_hwif_t *hwif;
+       struct ide_host *host;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+       int rc;
 
        if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
-               return 0;
+               return -ENODEV;
 
        printk(KERN_INFO "ide: Falcon IDE controller\n");
 
@@ -106,23 +128,25 @@ static int __init falconide_init(void)
 
        falconide_setup_ports(&hw);
 
-       hwif = ide_find_port();
-       if (hwif) {
-               u8 index = hwif->index;
-               u8 idx[4] = { index, 0xff, 0xff, 0xff };
-
-               ide_init_port_hw(hwif, &hw);
+       host = ide_host_alloc(&falconide_port_info, hws);
+       if (host == NULL) {
+               rc = -ENOMEM;
+               goto err;
+       }
 
-               /* Atari has a byte-swapped IDE interface */
-               hwif->input_data  = falconide_input_data;
-               hwif->output_data = falconide_output_data;
+       ide_get_lock(NULL, NULL);
+       rc = ide_host_register(host, &falconide_port_info, hws);
+       ide_release_lock();
 
-               ide_get_lock(NULL, NULL);
-               ide_device_add(idx, NULL);
-               ide_release_lock();
-       }
+       if (rc)
+               goto err_free;
 
        return 0;
+err_free:
+       ide_host_free(host);
+err:
+       release_mem_region(ATA_HD_BASE, 0x40);
+       return rc;
 }
 
 module_init(falconide_init);
index 7e74b20202dff34fc1edfe79c1503e85bb28a13d..dd5c467d8dd0582ac454bfd8796f2332a4a777b1 100644 (file)
@@ -31,6 +31,8 @@
 #define GAYLE_BASE_4000        0xdd2020        /* A4000/A4000T */
 #define GAYLE_BASE_1200        0xda0000        /* A1200/A600 and E-Matrix 530 */
 
+#define GAYLE_IDEREG_SIZE      0x2000
+
     /*
      *  Offsets from one of the above bases
      */
 #define GAYLE_NUM_HWIFS                1
 #define GAYLE_NUM_PROBE_HWIFS  GAYLE_NUM_HWIFS
 #define GAYLE_HAS_CONTROL_REG  1
-#define GAYLE_IDEREG_SIZE      0x2000
 #else /* CONFIG_BLK_DEV_IDEDOUBLER */
 #define GAYLE_NUM_HWIFS                2
 #define GAYLE_NUM_PROBE_HWIFS  (ide_doubler ? GAYLE_NUM_HWIFS : \
                                               GAYLE_NUM_HWIFS-1)
 #define GAYLE_HAS_CONTROL_REG  (!ide_doubler)
-#define GAYLE_IDEREG_SIZE      (ide_doubler ? 0x1000 : 0x2000)
 
 static int ide_doubler;
 module_param_named(doubler, ide_doubler, bool, 0);
@@ -124,8 +124,11 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
 
 static int __init gayle_init(void)
 {
+    unsigned long phys_base, res_start, res_n;
+    unsigned long base, ctrlport, irqport;
+    ide_ack_intr_t *ack_intr;
     int a4000, i;
-    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+    hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
 
     if (!MACH_IS_AMIGA)
        return -ENODEV;
@@ -148,13 +151,6 @@ found:
 #endif
                         "");
 
-    for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
-       unsigned long base, ctrlport, irqport;
-       ide_ack_intr_t *ack_intr;
-       hw_regs_t hw;
-       ide_hwif_t *hwif;
-       unsigned long phys_base, res_start, res_n;
-
        if (a4000) {
            phys_base = GAYLE_BASE_4000;
            irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
@@ -168,33 +164,22 @@ found:
  * FIXME: we now have selectable modes between mmio v/s iomio
  */
 
-       phys_base += i*GAYLE_NEXT_PORT;
-
        res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
        res_n = GAYLE_IDEREG_SIZE;
 
        if (!request_mem_region(res_start, res_n, "IDE"))
-           continue;
+               return -EBUSY;
 
-       base = (unsigned long)ZTWO_VADDR(phys_base);
+    for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
+       base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
        ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
 
-       gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
-
-       hwif = ide_find_port();
-       if (hwif) {
-           u8 index = hwif->index;
+       gayle_setup_ports(&hw[i], base, ctrlport, irqport, ack_intr);
 
-           ide_init_port_hw(hwif, &hw);
-
-           idx[i] = index;
-       } else
-           release_mem_region(res_start, res_n);
+       hws[i] = &hw[i];
     }
 
-    ide_device_add(idx, NULL);
-
-    return 0;
+    return ide_host_add(NULL, hws, NULL);
 }
 
 module_init(gayle_init);
index 89c8ff0a4d085f81e89bffba2fc3ba12a89f97c2..c76d55de6996413496ddc9c8d725f3feb633c860 100644 (file)
@@ -28,10 +28,8 @@ static const struct ide_port_info ide_4drives_port_info = {
 
 static int __init ide_4drives_init(void)
 {
-       ide_hwif_t *hwif, *mate;
        unsigned long base = 0x1f0, ctl = 0x3f6;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw;
+       hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL };
 
        if (probe_4drives == 0)
                return -ENODEV;
@@ -55,21 +53,7 @@ static int __init ide_4drives_init(void)
        hw.irq = 14;
        hw.chipset = ide_4drives;
 
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw);
-               idx[0] = hwif->index;
-       }
-
-       mate = ide_find_port();
-       if (mate) {
-               ide_init_port_hw(mate, &hw);
-               idx[1] = mate->index;
-       }
-
-       ide_device_add(idx, &ide_4drives_port_info);
-
-       return 0;
+       return ide_host_add(&ide_4drives_port_info, hws, NULL);
 }
 
 module_init(ide_4drives_init);
index 27b1e0b7ecb49b8b68e18248a1b53b80982f83ce..21bfac137844f689afbeb9d094a7e0f773625e3f 100644 (file)
@@ -74,7 +74,7 @@ INT_MODULE_PARM(pc_debug, 0);
 
 typedef struct ide_info_t {
        struct pcmcia_device    *p_dev;
-       ide_hwif_t              *hwif;
+       struct ide_host         *host;
     int                ndev;
     dev_node_t node;
 } ide_info_t;
@@ -132,7 +132,7 @@ static int ide_probe(struct pcmcia_device *link)
 static void ide_detach(struct pcmcia_device *link)
 {
     ide_info_t *info = link->priv;
-    ide_hwif_t *hwif = info->hwif;
+    ide_hwif_t *hwif = info->host->ports[0];
     unsigned long data_addr, ctl_addr;
 
     DEBUG(0, "ide_detach(0x%p)\n", link);
@@ -157,13 +157,13 @@ static const struct ide_port_info idecs_port_info = {
        .host_flags             = IDE_HFLAG_NO_DMA,
 };
 
-static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
+static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
                                unsigned long irq, struct pcmcia_device *handle)
 {
+    struct ide_host *host;
     ide_hwif_t *hwif;
-    hw_regs_t hw;
-    int i;
-    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+    int i, rc;
+    hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
     if (!request_region(io, 8, DRV_NAME)) {
        printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -184,30 +184,24 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
     hw.chipset = ide_pci;
     hw.dev = &handle->dev;
 
-    hwif = ide_find_port();
-    if (hwif == NULL)
+    rc = ide_host_add(&idecs_port_info, hws, &host);
+    if (rc)
        goto out_release;
 
-    i = hwif->index;
-
-    ide_init_port_hw(hwif, &hw);
-
-    idx[0] = i;
-
-    ide_device_add(idx, &idecs_port_info);
+    hwif = host->ports[0];
 
     if (hwif->present)
-       return hwif;
+       return host;
 
     /* retry registration in case device is still spinning up */
     for (i = 0; i < 10; i++) {
        msleep(100);
        ide_port_scan(hwif);
        if (hwif->present)
-           return hwif;
+           return host;
     }
 
-    return hwif;
+    return host;
 
 out_release:
     release_region(ctl, 1);
@@ -239,7 +233,7 @@ static int ide_config(struct pcmcia_device *link)
     cistpl_cftable_entry_t *cfg;
     int pass, last_ret = 0, last_fn = 0, is_kme = 0;
     unsigned long io_base, ctl_base;
-    ide_hwif_t *hwif;
+    struct ide_host *host;
 
     DEBUG(0, "ide_config(0x%p)\n", link);
 
@@ -334,21 +328,21 @@ static int ide_config(struct pcmcia_device *link)
     if (is_kme)
        outb(0x81, ctl_base+1);
 
-     hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
-     if (hwif == NULL && link->io.NumPorts1 == 0x20) {
+     host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+     if (host == NULL && link->io.NumPorts1 == 0x20) {
            outb(0x02, ctl_base + 0x10);
-           hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
+           host = idecs_register(io_base + 0x10, ctl_base + 0x10,
                                  link->irq.AssignedIRQ, link);
     }
 
-    if (hwif == NULL)
+    if (host == NULL)
        goto failed;
 
     info->ndev = 1;
-    sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);
-    info->node.major = hwif->major;
+    sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
+    info->node.major = host->ports[0]->major;
     info->node.minor = 0;
-    info->hwif = hwif;
+    info->host = host;
     link->dev_node = &info->node;
     printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
           info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -379,15 +373,15 @@ failed:
 static void ide_release(struct pcmcia_device *link)
 {
     ide_info_t *info = link->priv;
-    ide_hwif_t *hwif = info->hwif;
+    struct ide_host *host = info->host;
 
     DEBUG(0, "ide_release(0x%p)\n", link);
 
-    if (info->ndev) {
+    if (info->ndev)
        /* FIXME: if this fails we need to queue the cleanup somehow
           -- need to investigate the required PCMCIA magic */
-       ide_unregister(hwif);
-    }
+       ide_host_remove(host);
+
     info->ndev = 0;
 
     pcmcia_disable_device(link);
index a249562b34b52b1ddfb8401136dd5b9c062fd0f6..051b4ab0f359d033745e0a035235a656dea7be4e 100644 (file)
@@ -52,12 +52,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
 {
        struct resource *res_base, *res_alt, *res_irq;
        void __iomem *base, *alt_base;
-       ide_hwif_t *hwif;
        struct pata_platform_info *pdata;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       int ret = 0;
-       int mmio = 0;
-       hw_regs_t hw;
+       struct ide_host *host;
+       int ret = 0, mmio = 0;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
        struct ide_port_info d = platform_ide_port_info;
 
        pdata = pdev->dev.platform_data;
@@ -94,28 +92,18 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
                        res_alt->start, res_alt->end - res_alt->start + 1);
        }
 
-       hwif = ide_find_port();
-       if (!hwif) {
-               ret = -ENODEV;
-               goto out;
-       }
-
        memset(&hw, 0, sizeof(hw));
        plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
        hw.dev = &pdev->dev;
 
-       ide_init_port_hw(hwif, &hw);
-
-       if (mmio) {
+       if (mmio)
                d.host_flags |= IDE_HFLAG_MMIO;
-               default_hwif_mmiops(hwif);
-       }
 
-       idx[0] = hwif->index;
-
-       ide_device_add(idx, &d);
+       ret = ide_host_add(&d, hws, &host);
+       if (ret)
+               goto out;
 
-       platform_set_drvdata(pdev, hwif);
+       platform_set_drvdata(pdev, host);
 
        return 0;
 
@@ -125,9 +113,9 @@ out:
 
 static int __devexit plat_ide_remove(struct platform_device *pdev)
 {
-       ide_hwif_t *hwif = pdev->dev.driver_data;
+       struct ide_host *host = pdev->dev.driver_data;
 
-       ide_unregister(hwif);
+       ide_host_remove(host);
 
        return 0;
 }
index 0a6195bcfeda40681da03a0e53dfe3d769a802be..a0bb167980e745fee8be4274edc0579101de22c3 100644 (file)
@@ -91,11 +91,10 @@ static const char *mac_ide_name[] =
 
 static int __init macide_init(void)
 {
-       ide_hwif_t *hwif;
        ide_ack_intr_t *ack_intr;
        unsigned long base;
        int irq;
-       hw_regs_t hw;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        if (!MACH_IS_MAC)
                return -ENODEV;
@@ -125,17 +124,7 @@ static int __init macide_init(void)
 
        macide_setup_ports(&hw, base, irq, ack_intr);
 
-       hwif = ide_find_port();
-       if (hwif) {
-               u8 index = hwif->index;
-               u8 idx[4] = { index, 0xff, 0xff, 0xff };
-
-               ide_init_port_hw(hwif, &hw);
-
-               ide_device_add(idx, NULL);
-       }
-
-       return 0;
+       return ide_host_add(NULL, hws, NULL);
 }
 
 module_init(macide_init);
index 9c2b9d078f69e5bc993e5d9fc88a0b0f03d9669d..4abd8fc781979df7dea762bd6ece387a7f436509 100644 (file)
@@ -96,6 +96,27 @@ static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
        outsw_swapw(data_addr, buf, (len + 1) / 2);
 }
 
+/* Q40 has a byte-swapped IDE interface */
+static const struct ide_tp_ops q40ide_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = ide_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = ide_set_irq,
+
+       .tf_load                = ide_tf_load,
+       .tf_read                = ide_tf_read,
+
+       .input_data             = q40ide_input_data,
+       .output_data            = q40ide_output_data,
+};
+
+static const struct ide_port_info q40ide_port_info = {
+       .tp_ops                 = &q40ide_tp_ops,
+       .host_flags             = IDE_HFLAG_NO_DMA,
+};
+
 /* 
  * the static array is needed to have the name reported in /proc/ioports,
  * hwif->name unfortunately isn't available yet
@@ -111,9 +132,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
 static int __init q40ide_init(void)
 {
     int i;
-    ide_hwif_t *hwif;
-    const char *name;
-    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+    hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
 
     if (!MACH_IS_Q40)
       return -ENODEV;
@@ -121,9 +140,8 @@ static int __init q40ide_init(void)
     printk(KERN_INFO "ide: Q40 IDE controller\n");
 
     for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
-       hw_regs_t hw;
+       const char *name = q40_ide_names[i];
 
-       name = q40_ide_names[i];
        if (!request_region(pcide_bases[i], 8, name)) {
                printk("could not reserve ports %lx-%lx for %s\n",
                       pcide_bases[i],pcide_bases[i]+8,name);
@@ -135,26 +153,13 @@ static int __init q40ide_init(void)
                release_region(pcide_bases[i], 8);
                continue;
        }
-       q40_ide_setup_ports(&hw, pcide_bases[i],
-                       NULL,
-//                     m68kide_iops,
+       q40_ide_setup_ports(&hw[i], pcide_bases[i], NULL,
                        q40ide_default_irq(pcide_bases[i]));
 
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw);
-
-               /* Q40 has a byte-swapped IDE interface */
-               hwif->input_data  = q40ide_input_data;
-               hwif->output_data = q40ide_output_data;
-
-               idx[i] = hwif->index;
-       }
+       hws[i] = &hw[i];
     }
 
-    ide_device_add(idx, NULL);
-
-    return 0;
+    return ide_host_add(&q40ide_port_info, hws, NULL);
 }
 
 module_init(q40ide_init);
index 48d57cae63c69c267f42d08339e8ec42d13fa85e..11b7f61aae40f7a7d7ec8d29be48cd6c5ec78313 100644 (file)
@@ -519,6 +519,23 @@ static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
        *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
 }
 
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
+static const struct ide_tp_ops au1xxx_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = ide_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = ide_set_irq,
+
+       .tf_load                = ide_tf_load,
+       .tf_read                = ide_tf_read,
+
+       .input_data             = au1xxx_input_data,
+       .output_data            = au1xxx_output_data,
+};
+#endif
+
 static const struct ide_port_ops au1xxx_port_ops = {
        .set_pio_mode           = au1xxx_set_pio_mode,
        .set_dma_mode           = auide_set_dma_mode,
@@ -526,6 +543,9 @@ static const struct ide_port_ops au1xxx_port_ops = {
 
 static const struct ide_port_info au1xxx_port_info = {
        .init_dma               = auide_ddma_init,
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
+       .tp_ops                 = &au1xxx_tp_ops,
+#endif
        .port_ops               = &au1xxx_port_ops,
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
        .dma_ops                = &au1xxx_dma_ops,
@@ -543,11 +563,10 @@ static int au_ide_probe(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        _auide_hwif *ahwif = &auide_hwif;
-       ide_hwif_t *hwif;
        struct resource *res;
+       struct ide_host *host;
        int ret = 0;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
        char *mode = "MWDMA2";
@@ -584,36 +603,19 @@ static int au_ide_probe(struct device *dev)
                goto out;
        }
 
-       hwif = ide_find_port();
-       if (hwif == NULL) {
-               ret = -ENOENT;
-               goto out;
-       }
-
        memset(&hw, 0, sizeof(hw));
        auide_setup_ports(&hw, ahwif);
        hw.irq = ahwif->irq;
        hw.dev = dev;
        hw.chipset = ide_au1xxx;
 
-       ide_init_port_hw(hwif, &hw);
-
-       /* If the user has selected DDMA assisted copies,
-          then set up a few local I/O function entry points 
-       */
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA     
-       hwif->input_data  = au1xxx_input_data;
-       hwif->output_data = au1xxx_output_data;
-#endif
-
-       auide_hwif.hwif                 = hwif;
-
-       idx[0] = hwif->index;
+       ret = ide_host_add(&au1xxx_port_info, hws, &host);
+       if (ret)
+               goto out;
 
-       ide_device_add(idx, &au1xxx_port_info);
+       auide_hwif.hwif = host->ports[0];
 
-       dev_set_drvdata(dev, hwif);
+       dev_set_drvdata(dev, host);
 
        printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
 
@@ -625,10 +627,10 @@ static int au_ide_remove(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct resource *res;
-       ide_hwif_t *hwif = dev_get_drvdata(dev);
+       struct ide_host *host = dev_get_drvdata(dev);
        _auide_hwif *ahwif = &auide_hwif;
 
-       ide_unregister(hwif);
+       ide_host_remove(host);
 
        iounmap((void *)ahwif->regbase);
 
index 9f1212cc4aed3e33152a1ff0c32eebf62ee0f987..badf79fc9e3a1c67c3eef99bbbce4cb9c31eb355 100644 (file)
@@ -72,12 +72,11 @@ static const struct ide_port_info swarm_port_info = {
  */
 static int __devinit swarm_ide_probe(struct device *dev)
 {
-       ide_hwif_t *hwif;
        u8 __iomem *base;
+       struct ide_host *host;
        phys_t offset, size;
-       hw_regs_t hw;
-       int i;
-       u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
+       int i, rc;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        if (!SIBYTE_HAVE_IDE)
                return -ENODEV;
@@ -116,26 +115,17 @@ static int __devinit swarm_ide_probe(struct device *dev)
        hw.irq = K_INT_GB_IDE;
        hw.chipset = ide_generic;
 
-       hwif = ide_find_port_slot(&swarm_port_info);
-       if (hwif == NULL)
+       rc = ide_host_add(&swarm_port_info, hws, &host);
+       if (rc)
                goto err;
 
-       ide_init_port_hw(hwif, &hw);
-
-       /* Setup MMIO ops. */
-       default_hwif_mmiops(hwif);
-
-       idx[0] = hwif->index;
-
-       ide_device_add(idx, &swarm_port_info);
-
-       dev_set_drvdata(dev, hwif);
+       dev_set_drvdata(dev, host);
 
        return 0;
 err:
        release_resource(&swarm_ide_resource);
        iounmap(base);
-       return -ENOMEM;
+       return rc;
 }
 
 static struct device_driver swarm_ide_driver = {
index ae7a4329a581065dc10f1f18c83b1d5a7daedb1d..fbc43e121e6b19856f4ceb528c299d5dd682656e 100644 (file)
@@ -195,7 +195,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
                .host_flags     = IDE_HFLAG_SERIALIZE |
                                  IDE_HFLAG_NO_ATAPI_DMA |
                                  IDE_HFLAG_NO_DSC |
-                                 IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -205,7 +204,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
                .init_chipset   = init_chipset_aec62xx,
                .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
-                                 IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -216,7 +214,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
                .enablebits     = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
                .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
-                                 IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_NON_BOOTABLE,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -226,7 +223,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
                .init_chipset   = init_chipset_aec62xx,
                .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
-                                 IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -237,7 +233,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
                .enablebits     = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
                .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
-                                 IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
index 80d19c0eb78097d09cc822cb0ed27273563ce1cf..5ef7817ac64f852e5042f45a281ea243bcf4a802 100644 (file)
@@ -471,7 +471,15 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
        struct pci_dev *dev = to_pci_dev(hwif->dev);
        unsigned long base = ide_pci_dma_base(hwif, d);
 
-       if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+       if (base == 0)
+               return -1;
+
+       hwif->dma_base = base;
+
+       if (ide_pci_check_simplex(hwif, d) < 0)
+               return -1;
+
+       if (ide_pci_set_master(dev, d->name) < 0)
                return -1;
 
        if (!hwif->channel)
@@ -483,7 +491,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
        if (ide_allocate_dma_engine(hwif))
                return -1;
 
-       ide_setup_dma(hwif, base);
+       hwif->dma_ops = &sff_dma_ops;
 
        return 0;
 }
index 0bfcdd0e77b318a94691c69da0f7a6bdb8d261a6..ef7d971031eee6bad1aef03c49aedec4e6369c78 100644 (file)
@@ -218,7 +218,6 @@ static const struct ide_port_ops amd_port_ops = {
 
 #define IDE_HFLAGS_AMD \
        (IDE_HFLAG_PIO_NO_BLACKLIST | \
-        IDE_HFLAG_ABUSE_SET_DMA_MODE | \
         IDE_HFLAG_POST_SET_MODE | \
         IDE_HFLAG_IO_32BIT | \
         IDE_HFLAG_UNMASK_IRQS)
index 1ad1e23e310577c2875994fe421e969d6c61ab55..e6c62006ca1a48a2bdc2c460562f452aab58d347 100644 (file)
@@ -180,11 +180,6 @@ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
 
 static DEFINE_SPINLOCK(cmd640_lock);
 
-/*
- * These are initialized to point at the devices we control
- */
-static ide_hwif_t  *cmd_hwif0, *cmd_hwif1;
-
 /*
  * Interface to access cmd640x registers
  */
@@ -717,8 +712,7 @@ static int __init cmd640x_init(void)
        int second_port_cmd640 = 0, rc;
        const char *bus_type, *port2;
        u8 b, cfr;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
+       hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
 
        if (cmd640_vlb && probe_for_cmd640_vlb()) {
                bus_type = "VLB";
@@ -781,15 +775,10 @@ static int __init cmd640x_init(void)
        printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
                         "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
 
-       cmd_hwif0 = ide_find_port();
-
        /*
         * Initialize data for primary port
         */
-       if (cmd_hwif0) {
-               ide_init_port_hw(cmd_hwif0, &hw[0]);
-               idx[0] = cmd_hwif0->index;
-       }
+       hws[0] = &hw[0];
 
        /*
         * Ensure compatibility by always using the slowest timings
@@ -829,13 +818,9 @@ static int __init cmd640x_init(void)
        /*
         * Initialize data for secondary cmd640 port, if enabled
         */
-       if (second_port_cmd640) {
-               cmd_hwif1 = ide_find_port();
-               if (cmd_hwif1) {
-                       ide_init_port_hw(cmd_hwif1, &hw[1]);
-                       idx[1] = cmd_hwif1->index;
-               }
-       }
+       if (second_port_cmd640)
+               hws[1] = &hw[1];
+
        printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
                         second_port_cmd640 ? "" : "not ", port2);
 
@@ -843,9 +828,7 @@ static int __init cmd640x_init(void)
        cmd640_dump_regs();
 #endif
 
-       ide_device_add(idx, &cmd640_port_info);
-
-       return 1;
+       return ide_host_add(&cmd640_port_info, hws, NULL);
 }
 
 module_param_named(probe_vlb, cmd640_vlb, bool, 0);
index cfa784bacf48678f712e169775a4f10293a89cc0..ce58bfcdb3c6c645c51b70321d799dfa0157524b 100644 (file)
@@ -262,7 +262,7 @@ static int cmd648_dma_test_irq(ide_drive_t *drive)
        unsigned long base      = hwif->dma_base - (hwif->channel * 8);
        u8 irq_mask             = hwif->channel ? MRDMODE_INTR_CH1 :
                                                  MRDMODE_INTR_CH0;
-       u8 dma_stat             = inb(hwif->dma_status);
+       u8 dma_stat             = inb(hwif->dma_base + ATA_DMA_STATUS);
        u8 mrdmode              = inb(base + 1);
 
 #ifdef DEBUG
@@ -286,7 +286,7 @@ static int cmd64x_dma_test_irq(ide_drive_t *drive)
        int irq_reg             = hwif->channel ? ARTTIM23 : CFR;
        u8  irq_mask            = hwif->channel ? ARTTIM23_INTR_CH1 :
                                                  CFR_INTR_CH0;
-       u8  dma_stat            = inb(hwif->dma_status);
+       u8  dma_stat            = inb(hwif->dma_base + ATA_DMA_STATUS);
        u8  irq_stat            = 0;
 
        (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
@@ -317,13 +317,13 @@ static int cmd646_1_dma_end(ide_drive_t *drive)
 
        drive->waiting_for_dma = 0;
        /* get DMA status */
-       dma_stat = inb(hwif->dma_status);
+       dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
        /* read DMA command state */
-       dma_cmd = inb(hwif->dma_command);
+       dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
        /* stop DMA */
-       outb(dma_cmd & ~1, hwif->dma_command);
+       outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
        /* clear the INTR & ERROR bits */
-       outb(dma_stat | 6, hwif->dma_status);
+       outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
        /* and free any DMA resources */
        ide_destroy_dmatable(drive);
        /* verify good DMA status */
index 992b1cf8db6976be67833338a6932ba19593e4aa..b03d8ae947e6fc10c6682d8944cae1dea4f5f4f9 100644 (file)
@@ -62,8 +62,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
        struct pci_dev *pdev = to_pci_dev(hwif->dev);
        int controller = drive->dn > 1 ? 1 : 0;
 
-       /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
-
        /* 8bit CAT/CRT - 8bit command timing for channel */
        pci_write_config_byte(pdev, 0x62 + controller, 
                (cs5520_pio_clocks[pio].recovery << 4) |
@@ -89,46 +87,17 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
        cs5520_set_pio_mode(drive, 0);
 }
 
-/*
- *     We wrap the DMA activate to set the vdma flag. This is needed
- *     so that the IDE DMA layer issues PIO not DMA commands over the
- *     DMA channel
- *
- *     ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
- */
-
-static void cs5520_dma_host_set(ide_drive_t *drive, int on)
-{
-       drive->vdma = on;
-       ide_dma_host_set(drive, on);
-}
-
 static const struct ide_port_ops cs5520_port_ops = {
        .set_pio_mode           = cs5520_set_pio_mode,
        .set_dma_mode           = cs5520_set_dma_mode,
 };
 
-static const struct ide_dma_ops cs5520_dma_ops = {
-       .dma_host_set           = cs5520_dma_host_set,
-       .dma_setup              = ide_dma_setup,
-       .dma_exec_cmd           = ide_dma_exec_cmd,
-       .dma_start              = ide_dma_start,
-       .dma_end                = __ide_dma_end,
-       .dma_test_irq           = ide_dma_test_irq,
-       .dma_lost_irq           = ide_dma_lost_irq,
-       .dma_timeout            = ide_dma_timeout,
-};
-
-/* FIXME: VDMA is disabled because it caused system hangs */
 #define DECLARE_CS_DEV(name_str)                               \
        {                                                       \
                .name           = name_str,                     \
                .port_ops       = &cs5520_port_ops,             \
-               .dma_ops        = &cs5520_dma_ops,              \
                .host_flags     = IDE_HFLAG_ISA_PORTS |         \
-                                 IDE_HFLAG_CS5520 |            \
-                                 IDE_HFLAG_NO_ATAPI_DMA |      \
-                                 IDE_HFLAG_ABUSE_SET_DMA_MODE, \
+                                 IDE_HFLAG_CS5520,             \
                .pio_mask       = ATA_PIO4,                     \
        }
 
@@ -146,7 +115,7 @@ static const struct ide_port_info cyrix_chipsets[] __devinitdata = {
 static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
        const struct ide_port_info *d = &cyrix_chipsets[id->driver_data];
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
 
        ide_setup_pci_noise(dev, d);
 
@@ -168,11 +137,9 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
         *      do all the device setup for us
         */
 
-       ide_pci_setup_ports(dev, d, 14, &idx[0]);
-
-       ide_device_add(idx, d);
+       ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);
 
-       return 0;
+       return ide_host_add(d, hws, NULL);
 }
 
 static const struct pci_device_id cs5520_pci_tbl[] = {
index dc97c48623f3fc438fb2237d259d55eb2046d985..5404fe4f701d79cc4bc651c99548ce35da9d4c67 100644 (file)
@@ -171,8 +171,7 @@ static const struct ide_port_ops cs5535_port_ops = {
 static const struct ide_port_info cs5535_chipset __devinitdata = {
        .name           = "CS5535",
        .port_ops       = &cs5535_port_ops,
-       .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
-                         IDE_HFLAG_ABUSE_SET_DMA_MODE,
+       .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
        .pio_mask       = ATA_PIO4,
        .mwdma_mask     = ATA_MWDMA2,
        .udma_mask      = ATA_UDMA4,
index 0106e2a2df77a3ab8c42fd9092e440c3a309a9b9..f84bfb4f600f1a0ba3473c4ab7fb8275dc7093db 100644 (file)
@@ -56,11 +56,10 @@ static const struct ide_port_info delkin_cb_port_info = {
 static int __devinit
 delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
 {
+       struct ide_host *host;
        unsigned long base;
-       hw_regs_t hw;
-       ide_hwif_t *hwif = NULL;
        int i, rc;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
 
        rc = pci_enable_device(dev);
        if (rc) {
@@ -87,34 +86,26 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
        hw.dev = &dev->dev;
        hw.chipset = ide_pci;           /* this enables IRQ sharing */
 
-       hwif = ide_find_port();
-       if (hwif == NULL)
+       rc = ide_host_add(&delkin_cb_port_info, hws, &host);
+       if (rc)
                goto out_disable;
 
-       i = hwif->index;
-
-       ide_init_port_hw(hwif, &hw);
-
-       idx[0] = i;
-
-       ide_device_add(idx, &delkin_cb_port_info);
-
-       pci_set_drvdata(dev, hwif);
+       pci_set_drvdata(dev, host);
 
        return 0;
 
 out_disable:
        pci_release_regions(dev);
        pci_disable_device(dev);
-       return -ENODEV;
+       return rc;
 }
 
 static void
 delkin_cb_remove (struct pci_dev *dev)
 {
-       ide_hwif_t *hwif = pci_get_drvdata(dev);
+       struct ide_host *host = pci_get_drvdata(dev);
 
-       ide_unregister(hwif);
+       ide_host_remove(host);
 
        pci_release_regions(dev);
        pci_disable_device(dev);
index 84c36c117194cb731e24e2ea60ee7b67e2df5681..9e1d1c4741da5567509c28a671ea9e82aa410139 100644 (file)
@@ -123,7 +123,6 @@ static const struct ide_port_ops hpt34x_port_ops = {
 #define IDE_HFLAGS_HPT34X \
        (IDE_HFLAG_NO_ATAPI_DMA | \
         IDE_HFLAG_NO_DSC | \
-        IDE_HFLAG_ABUSE_SET_DMA_MODE | \
         IDE_HFLAG_NO_AUTODMA)
 
 static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
index 397c6cbe953c0e42fc8a7636b9eca36377a429b6..1f1135ce7cd6616f9626c062013168e6d5b3af9c 100644 (file)
@@ -801,9 +801,9 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
        printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
 
        /* get DMA command mode */
-       dma_cmd = inb(hwif->dma_command);
+       dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
        /* stop DMA */
-       outb(dma_cmd & ~0x1, hwif->dma_command);
+       outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD);
        hpt370_clear_engine(drive);
 }
 
@@ -818,12 +818,12 @@ static void hpt370_dma_start(ide_drive_t *drive)
 static int hpt370_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
-       u8  dma_stat            = inb(hwif->dma_status);
+       u8  dma_stat            = inb(hwif->dma_base + ATA_DMA_STATUS);
 
        if (dma_stat & 0x01) {
                /* wait a little */
                udelay(20);
-               dma_stat = inb(hwif->dma_status);
+               dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
                if (dma_stat & 0x01)
                        hpt370_irq_timeout(drive);
        }
@@ -850,7 +850,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive)
                return 0;
        }
 
-       dma_stat = inb(hwif->dma_status);
+       dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
        /* return 1 if INTR asserted */
        if (dma_stat & 4)
                return 1;
@@ -1320,7 +1320,15 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
        unsigned long flags, base = ide_pci_dma_base(hwif, d);
        u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
 
-       if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+       if (base == 0)
+               return -1;
+
+       hwif->dma_base = base;
+
+       if (ide_pci_check_simplex(hwif, d) < 0)
+               return -1;
+
+       if (ide_pci_set_master(dev, d->name) < 0)
                return -1;
 
        dma_old = inb(base + 2);
@@ -1346,7 +1354,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
        if (ide_allocate_dma_engine(hwif))
                return -1;
 
-       ide_setup_dma(hwif, base);
+       hwif->dma_ops = &sff_dma_ops;
 
        return 0;
 }
@@ -1401,7 +1409,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
 
 #define IDE_HFLAGS_HPT3XX \
        (IDE_HFLAG_NO_ATAPI_DMA | \
-        IDE_HFLAG_ABUSE_SET_DMA_MODE | \
         IDE_HFLAG_OFF_BOARD)
 
 static const struct ide_port_ops hpt3xx_port_ops = {
index 45ba71a7182fdf87a4d988a0c4eee66cb224c6d4..5cd2b32ff0ef000d61a06047bddb19f9a911c342 100644 (file)
  */
 #include <asm/superio.h>
 
-static unsigned long superio_ide_status[2];
-static unsigned long superio_ide_select[2];
-static unsigned long superio_ide_dma_status[2];
-
 #define SUPERIO_IDE_MAX_RETRIES 25
 
 /* Because of a defect in Super I/O, all reads of the PCI DMA status 
@@ -40,27 +36,28 @@ static unsigned long superio_ide_dma_status[2];
  */
 static u8 superio_ide_inb (unsigned long port)
 {
-       if (port == superio_ide_status[0] ||
-           port == superio_ide_status[1] ||
-           port == superio_ide_select[0] ||
-           port == superio_ide_select[1] ||
-           port == superio_ide_dma_status[0] ||
-           port == superio_ide_dma_status[1]) {
-               u8 tmp;
-               int retries = SUPERIO_IDE_MAX_RETRIES;
+       u8 tmp;
+       int retries = SUPERIO_IDE_MAX_RETRIES;
 
-               /* printk(" [ reading port 0x%x with retry ] ", port); */
+       /* printk(" [ reading port 0x%x with retry ] ", port); */
 
-               do {
-                       tmp = inb(port);
-                       if (tmp == 0)
-                               udelay(50);
-               } while (tmp == 0 && retries-- > 0);
+       do {
+               tmp = inb(port);
+               if (tmp == 0)
+                       udelay(50);
+       } while (tmp == 0 && retries-- > 0);
 
-               return tmp;
-       }
+       return tmp;
+}
 
-       return inb(port);
+static u8 superio_read_status(ide_hwif_t *hwif)
+{
+       return superio_ide_inb(hwif->io_ports.status_addr);
+}
+
+static u8 superio_read_sff_dma_status(ide_hwif_t *hwif)
+{
+       return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
 }
 
 static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
@@ -78,6 +75,8 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
        /* be sure we're looking at the low order bits */
        outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
 
+       if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+               tf->feature = inb(io_ports->feature_addr);
        if (task->tf_flags & IDE_TFLAG_IN_NSECT)
                tf->nsect  = inb(io_ports->nsect_addr);
        if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -105,36 +104,32 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
        }
 }
 
-static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
-{
-       struct pci_dev *pdev = to_pci_dev(hwif->dev);
-       u32 base, dmabase;
-       u8 port = hwif->channel, tmp;
+static const struct ide_tp_ops superio_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = superio_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = superio_read_sff_dma_status,
 
-       base = pci_resource_start(pdev, port * 2) & ~3;
-       dmabase = pci_resource_start(pdev, 4) & ~3;
-
-       superio_ide_status[port] = base + 7;
-       superio_ide_select[port] = base + 6;
-       superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
-
-       /* Clear error/interrupt, enable dma */
-       tmp = superio_ide_inb(superio_ide_dma_status[port]);
-       outb(tmp | 0x66, superio_ide_dma_status[port]);
+       .set_irq                = ide_set_irq,
 
-       hwif->tf_read = superio_tf_read;
+       .tf_load                = ide_tf_load,
+       .tf_read                = superio_tf_read,
 
-       /* We need to override inb to workaround a SuperIO errata */
-       hwif->INB = superio_ide_inb;
-}
+       .input_data             = ide_input_data,
+       .output_data            = ide_output_data,
+};
 
-static void __devinit init_iops_ns87415(ide_hwif_t *hwif)
+static void __devinit superio_init_iops(struct hwif_s *hwif)
 {
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
+       struct pci_dev *pdev = to_pci_dev(hwif->dev);
+       u32 dma_stat;
+       u8 port = hwif->channel, tmp;
 
-       if (PCI_SLOT(dev->devfn) == 0xE)
-               /* Built-in - assume it's under superio. */
-               superio_ide_init_iops(hwif);
+       dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
+
+       /* Clear error/interrupt, enable dma */
+       tmp = superio_ide_inb(dma_stat);
+       outb(tmp | 0x66, dma_stat);
 }
 #endif
 
@@ -200,14 +195,14 @@ static int ns87415_dma_end(ide_drive_t *drive)
        u8 dma_stat = 0, dma_cmd = 0;
 
        drive->waiting_for_dma = 0;
-       dma_stat = hwif->INB(hwif->dma_status);
-       /* get dma command mode */
-       dma_cmd = hwif->INB(hwif->dma_command);
+       dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
+       /* get DMA command mode */
+       dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
        /* stop DMA */
-       outb(dma_cmd & ~1, hwif->dma_command);
+       outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
        /* from ERRATA: clear the INTR & ERROR bits */
-       dma_cmd = hwif->INB(hwif->dma_command);
-       outb(dma_cmd | 6, hwif->dma_command);
+       dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
+       outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
        /* and free any DMA resources */
        ide_destroy_dmatable(drive);
        /* verify good DMA status */
@@ -276,7 +271,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
                outb(8, hwif->io_ports.ctl_addr);
                do {
                        udelay(50);
-                       stat = hwif->INB(hwif->io_ports.status_addr);
+                       stat = hwif->tp_ops->read_status(hwif);
                        if (stat == 0xff)
                                break;
                } while ((stat & BUSY_STAT) && --timeout);
@@ -291,7 +286,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
        if (!hwif->dma_base)
                return;
 
-       outb(0x60, hwif->dma_status);
+       outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
 }
 
 static const struct ide_port_ops ns87415_port_ops = {
@@ -311,9 +306,6 @@ static const struct ide_dma_ops ns87415_dma_ops = {
 
 static const struct ide_port_info ns87415_chipset __devinitdata = {
        .name           = "NS87415",
-#ifdef CONFIG_SUPERIO
-       .init_iops      = init_iops_ns87415,
-#endif
        .init_hwif      = init_hwif_ns87415,
        .port_ops       = &ns87415_port_ops,
        .dma_ops        = &ns87415_dma_ops,
@@ -323,7 +315,16 @@ static const struct ide_port_info ns87415_chipset __devinitdata = {
 
 static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
-       return ide_setup_pci_device(dev, &ns87415_chipset);
+       struct ide_port_info d = ns87415_chipset;
+
+#ifdef CONFIG_SUPERIO
+       if (PCI_SLOT(dev->devfn) == 0xE) {
+               /* Built-in - assume it's under superio. */
+               d.init_iops = superio_init_iops;
+               d.tp_ops = &superio_tp_ops;
+       }
+#endif
+       return ide_setup_pci_device(dev, &d);
 }
 
 static const struct pci_device_id ns87415_pci_tbl[] = {
index fca89eda5c022cf8538eeaa0e89dd9344223d6ec..e54dc653b8c4aa609edbb6d16a0ab85e0bfa034b 100644 (file)
@@ -206,7 +206,7 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        unsigned long high_16   = hwif->extra_base - 16;
-       u8 dma_stat             = inb(hwif->dma_status);
+       u8 dma_stat             = inb(hwif->dma_base + ATA_DMA_STATUS);
        u8 sc1d                 = inb(high_16 + 0x001d);
 
        if (hwif->channel) {
@@ -312,7 +312,6 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
 
 #define IDE_HFLAGS_PDC202XX \
        (IDE_HFLAG_ERROR_STOPS_FIFO | \
-        IDE_HFLAG_ABUSE_SET_DMA_MODE | \
         IDE_HFLAG_OFF_BOARD)
 
 static const struct ide_port_ops pdc20246_port_ops = {
index f04738d14a6f93b8499222dedef55ac582f4961c..0ce41b4dddafc543855f7c491c8fcf6599b666d2 100644 (file)
@@ -227,9 +227,9 @@ static void piix_dma_clear_irq(ide_drive_t *drive)
        u8 dma_stat;
 
        /* clear the INTR & ERROR bits */
-       dma_stat = inb(hwif->dma_status);
+       dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
        /* Should we force the bit as well ? */
-       outb(dma_stat, hwif->dma_status);
+       outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
 }
 
 struct ich_laptop {
index 789c66dfbde5a424f841cb8e5874a1e887f6959f..94a7ab86423616e7889c4bc4a2fa63f1f1d5b7b5 100644 (file)
@@ -65,7 +65,7 @@
 
 static struct scc_ports {
        unsigned long ctl, dma;
-       ide_hwif_t *hwif;  /* for removing port from system */
+       struct ide_host *host;  /* for removing port from system */
 } scc_ports[MAX_HWIFS];
 
 /* PIO transfer mode  table */
@@ -126,6 +126,46 @@ static u8 scc_ide_inb(unsigned long port)
        return (u8)data;
 }
 
+static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+       out_be32((void *)hwif->io_ports.command_addr, cmd);
+       eieio();
+       in_be32((void *)(hwif->dma_base + 0x01c));
+       eieio();
+}
+
+static u8 scc_read_status(ide_hwif_t *hwif)
+{
+       return (u8)in_be32((void *)hwif->io_ports.status_addr);
+}
+
+static u8 scc_read_altstatus(ide_hwif_t *hwif)
+{
+       return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
+}
+
+static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
+{
+       return (u8)in_be32((void *)(hwif->dma_base + 4));
+}
+
+static void scc_set_irq(ide_hwif_t *hwif, int on)
+{
+       u8 ctl = ATA_DEVCTL_OBS;
+
+       if (on == 4) { /* hack for SRST */
+               ctl |= 4;
+               on &= ~4;
+       }
+
+       ctl |= on ? 0 : 2;
+
+       out_be32((void *)hwif->io_ports.ctl_addr, ctl);
+       eieio();
+       in_be32((void *)(hwif->dma_base + 0x01c));
+       eieio();
+}
+
 static void scc_ide_insw(unsigned long port, void *addr, u32 count)
 {
        u16 *ptr = (u16 *)addr;
@@ -148,14 +188,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
        out_be32((void*)port, addr);
 }
 
-static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
-{
-       out_be32((void*)port, addr);
-       eieio();
-       in_be32((void*)(hwif->dma_base + 0x01c));
-       eieio();
-}
-
 static void
 scc_ide_outsw(unsigned long port, void *addr, u32 count)
 {
@@ -261,14 +293,14 @@ static void scc_dma_host_set(ide_drive_t *drive, int on)
 {
        ide_hwif_t *hwif = drive->hwif;
        u8 unit = (drive->select.b.unit & 0x01);
-       u8 dma_stat = scc_ide_inb(hwif->dma_status);
+       u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
 
        if (on)
                dma_stat |= (1 << (5 + unit));
        else
                dma_stat &= ~(1 << (5 + unit));
 
-       scc_ide_outb(dma_stat, hwif->dma_status);
+       scc_ide_outb(dma_stat, hwif->dma_base + 4);
 }
 
 /**
@@ -304,13 +336,13 @@ static int scc_dma_setup(ide_drive_t *drive)
        out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
 
        /* specify r/w */
-       out_be32((void __iomem *)hwif->dma_command, reading);
+       out_be32((void __iomem *)hwif->dma_base, reading);
 
-       /* read dma_status for INTR & ERROR flags */
-       dma_stat = in_be32((void __iomem *)hwif->dma_status);
+       /* read DMA status for INTR & ERROR flags */
+       dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
 
        /* clear INTR & ERROR flags */
-       out_be32((void __iomem *)hwif->dma_status, dma_stat|6);
+       out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
        drive->waiting_for_dma = 1;
        return 0;
 }
@@ -318,10 +350,10 @@ static int scc_dma_setup(ide_drive_t *drive)
 static void scc_dma_start(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
-       u8 dma_cmd = scc_ide_inb(hwif->dma_command);
+       u8 dma_cmd = scc_ide_inb(hwif->dma_base);
 
        /* start DMA */
-       scc_ide_outb(dma_cmd | 1, hwif->dma_command);
+       scc_ide_outb(dma_cmd | 1, hwif->dma_base);
        hwif->dma = 1;
        wmb();
 }
@@ -333,13 +365,13 @@ static int __scc_dma_end(ide_drive_t *drive)
 
        drive->waiting_for_dma = 0;
        /* get DMA command mode */
-       dma_cmd = scc_ide_inb(hwif->dma_command);
+       dma_cmd = scc_ide_inb(hwif->dma_base);
        /* stop DMA */
-       scc_ide_outb(dma_cmd & ~1, hwif->dma_command);
+       scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
        /* get DMA status */
-       dma_stat = scc_ide_inb(hwif->dma_status);
+       dma_stat = scc_ide_inb(hwif->dma_base + 4);
        /* clear the INTR & ERROR bits */
-       scc_ide_outb(dma_stat | 6, hwif->dma_status);
+       scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
        /* purge DMA mappings */
        ide_destroy_dmatable(drive);
        /* verify good DMA status */
@@ -359,6 +391,7 @@ static int __scc_dma_end(ide_drive_t *drive)
 static int scc_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
+       void __iomem *dma_base = (void __iomem *)hwif->dma_base;
        unsigned long intsts_port = hwif->dma_base + 0x014;
        u32 reg;
        int dma_stat, data_loss = 0;
@@ -397,7 +430,7 @@ static int scc_dma_end(ide_drive_t *drive)
                        printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
                        out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
 
-                       out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
                        continue;
                }
 
@@ -412,7 +445,7 @@ static int scc_dma_end(ide_drive_t *drive)
 
                        out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
 
-                       out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
                        continue;
                }
 
@@ -420,12 +453,12 @@ static int scc_dma_end(ide_drive_t *drive)
                        printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
                        out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
 
-                       out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
                        continue;
                }
 
                if (reg & INTSTS_ICERR) {
-                       out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
 
                        printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
                        out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
@@ -553,14 +586,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
                                    const struct ide_port_info *d)
 {
        struct scc_ports *ports = pci_get_drvdata(dev);
-       ide_hwif_t *hwif = NULL;
-       hw_regs_t hw;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       int i;
-
-       hwif = ide_find_port_slot(d);
-       if (hwif == NULL)
-               return -ENOMEM;
+       struct ide_host *host;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+       int i, rc;
 
        memset(&hw, 0, sizeof(hw));
        for (i = 0; i <= 8; i++)
@@ -568,11 +596,12 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
        hw.irq = dev->irq;
        hw.dev = &dev->dev;
        hw.chipset = ide_pci;
-       ide_init_port_hw(hwif, &hw);
 
-       idx[0] = hwif->index;
+       rc = ide_host_add(d, hws, &host);
+       if (rc)
+               return rc;
 
-       ide_device_add(idx, d);
+       ports->host = host;
 
        return 0;
 }
@@ -701,6 +730,8 @@ static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
        /* be sure we're looking at the low order bits */
        scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
 
+       if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+               tf->feature = scc_ide_inb(io_ports->feature_addr);
        if (task->tf_flags & IDE_TFLAG_IN_NSECT)
                tf->nsect  = scc_ide_inb(io_ports->nsect_addr);
        if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -774,16 +805,6 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
 
        ide_set_hwifdata(hwif, ports);
 
-       hwif->tf_load = scc_tf_load;
-       hwif->tf_read = scc_tf_read;
-
-       hwif->input_data  = scc_input_data;
-       hwif->output_data = scc_output_data;
-
-       hwif->INB = scc_ide_inb;
-       hwif->OUTB = scc_ide_outb;
-       hwif->OUTBSYNC = scc_ide_outbsync;
-
        hwif->dma_base = dma_base;
        hwif->config_data = ports->ctl;
 }
@@ -824,11 +845,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
 {
        struct scc_ports *ports = ide_get_hwifdata(hwif);
 
-       ports->hwif = hwif;
-
-       hwif->dma_command = hwif->dma_base;
-       hwif->dma_status = hwif->dma_base + 0x04;
-
        /* PTERADD */
        out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
 
@@ -838,6 +854,21 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
                hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
 }
 
+static const struct ide_tp_ops scc_tp_ops = {
+       .exec_command           = scc_exec_command,
+       .read_status            = scc_read_status,
+       .read_altstatus         = scc_read_altstatus,
+       .read_sff_dma_status    = scc_read_sff_dma_status,
+
+       .set_irq                = scc_set_irq,
+
+       .tf_load                = scc_tf_load,
+       .tf_read                = scc_tf_read,
+
+       .input_data             = scc_input_data,
+       .output_data            = scc_output_data,
+};
+
 static const struct ide_port_ops scc_port_ops = {
        .set_pio_mode           = scc_set_pio_mode,
        .set_dma_mode           = scc_set_dma_mode,
@@ -861,6 +892,7 @@ static const struct ide_dma_ops scc_dma_ops = {
       .name            = name_str,                     \
       .init_iops       = init_iops_scc,                \
       .init_hwif       = init_hwif_scc,                \
+      .tp_ops          = &scc_tp_ops,          \
       .port_ops                = &scc_port_ops,                \
       .dma_ops         = &scc_dma_ops,                 \
       .host_flags      = IDE_HFLAG_SINGLE,             \
@@ -895,7 +927,8 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
 static void __devexit scc_remove(struct pci_dev *dev)
 {
        struct scc_ports *ports = pci_get_drvdata(dev);
-       ide_hwif_t *hwif = ports->hwif;
+       struct ide_host *host = ports->host;
+       ide_hwif_t *hwif = host->ports[0];
 
        if (hwif->dmatable_cpu) {
                pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -903,7 +936,7 @@ static void __devexit scc_remove(struct pci_dev *dev)
                hwif->dmatable_cpu = NULL;
        }
 
-       ide_unregister(hwif);
+       ide_host_remove(host);
 
        iounmap((void*)ports->dma);
        iounmap((void*)ports->ctl);
index a1fb20826a5b817244d500db92c921a054c9ddee..127ccb45e261c23cb799bcb1e886141155f4b779 100644 (file)
@@ -349,9 +349,7 @@ static const struct ide_port_ops svwks_port_ops = {
        .cable_detect           = svwks_cable_detect,
 };
 
-#define IDE_HFLAGS_SVWKS \
-       (IDE_HFLAG_LEGACY_IRQS | \
-        IDE_HFLAG_ABUSE_SET_DMA_MODE)
+#define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS
 
 static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
        {       /* 0 */
index c79ff5b41088e40d09a124de94785cf1959408c1..42eef19a18f14f7c46611ffb2a7073e8d8cce6d4 100644 (file)
@@ -127,7 +127,7 @@ sgiioc4_checkirq(ide_hwif_t * hwif)
        return 0;
 }
 
-static u8 sgiioc4_INB(unsigned long);
+static u8 sgiioc4_read_status(ide_hwif_t *);
 
 static int
 sgiioc4_clearirq(ide_drive_t * drive)
@@ -141,18 +141,19 @@ sgiioc4_clearirq(ide_drive_t * drive)
        intr_reg = readl((void __iomem *)other_ir);
        if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
                /*
-                * Using sgiioc4_INB to read the Status register has a side
-                * effect of clearing the interrupt.  The first read should
+                * Using sgiioc4_read_status to read the Status register has a
+                * side effect of clearing the interrupt.  The first read should
                 * clear it if it is set.  The second read should return
                 * a "clear" status if it got cleared.  If not, then spin
                 * for a bit trying to clear it.
                 */
-               u8 stat = sgiioc4_INB(io_ports->status_addr);
+               u8 stat = sgiioc4_read_status(hwif);
                int count = 0;
-               stat = sgiioc4_INB(io_ports->status_addr);
+
+               stat = sgiioc4_read_status(hwif);
                while ((stat & 0x80) && (count++ < 100)) {
                        udelay(1);
-                       stat = sgiioc4_INB(io_ports->status_addr);
+                       stat = sgiioc4_read_status(hwif);
                }
 
                if (intr_reg & 0x02) {
@@ -304,9 +305,9 @@ sgiioc4_dma_lost_irq(ide_drive_t * drive)
        ide_dma_lost_irq(drive);
 }
 
-static u8
-sgiioc4_INB(unsigned long port)
+static u8 sgiioc4_read_status(ide_hwif_t *hwif)
 {
+       unsigned long port = hwif->io_ports.status_addr;
        u8 reg = (u8) readb((void __iomem *) port);
 
        if ((port & 0xFFF) == 0x11C) {  /* Status register of IOC4 */
@@ -549,6 +550,21 @@ static int sgiioc4_dma_setup(ide_drive_t *drive)
        return 0;
 }
 
+static const struct ide_tp_ops sgiioc4_tp_ops = {
+       .exec_command           = ide_exec_command,
+       .read_status            = sgiioc4_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = ide_set_irq,
+
+       .tf_load                = ide_tf_load,
+       .tf_read                = ide_tf_read,
+
+       .input_data             = ide_input_data,
+       .output_data            = ide_output_data,
+};
+
 static const struct ide_port_ops sgiioc4_port_ops = {
        .set_dma_mode           = sgiioc4_set_dma_mode,
        /* reset DMA engine, clear IRQs */
@@ -571,6 +587,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
        .name                   = DRV_NAME,
        .chipset                = ide_pci,
        .init_dma               = ide_dma_sgiioc4,
+       .tp_ops                 = &sgiioc4_tp_ops,
        .port_ops               = &sgiioc4_port_ops,
        .dma_ops                = &sgiioc4_dma_ops,
        .host_flags             = IDE_HFLAG_MMIO,
@@ -583,10 +600,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
        unsigned long cmd_base, irqport;
        unsigned long bar0, cmd_phys_base, ctl;
        void __iomem *virt_base;
-       ide_hwif_t *hwif;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw;
+       struct ide_host *host;
+       hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
        struct ide_port_info d = sgiioc4_port_info;
+       int rc;
 
        /*  Get the CmdBlk and CtrlBlk Base Registers */
        bar0 = pci_resource_start(dev, 0);
@@ -618,30 +635,26 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
        hw.chipset = ide_pci;
        hw.dev = &dev->dev;
 
-       hwif = ide_find_port_slot(&d);
-       if (hwif == NULL)
-               goto err;
-
-       ide_init_port_hw(hwif, &hw);
-
-       /* The IOC4 uses MMIO rather than Port IO. */
-       default_hwif_mmiops(hwif);
-
        /* Initializing chipset IRQ Registers */
        writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
 
-       hwif->INB = &sgiioc4_INB;
-
-       idx[0] = hwif->index;
+       host = ide_host_alloc(&d, hws);
+       if (host == NULL) {
+               rc = -ENOMEM;
+               goto err;
+       }
 
-       if (ide_device_add(idx, &d))
-               return -EIO;
+       rc = ide_host_register(host, &d, hws);
+       if (rc)
+               goto err_free;
 
        return 0;
+err_free:
+       ide_host_free(host);
 err:
        release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
        iounmap(virt_base);
-       return -ENOMEM;
+       return rc;
 }
 
 static unsigned int __devinit
index 6e9d7655d89c02d1f6e426cd681428981fa66200..5965a35d94ae75a7c64c57fb0def8e5aed27ef0f 100644 (file)
@@ -334,7 +334,7 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive)
        unsigned long addr      = siimage_selreg(hwif, 1);
 
        /* return 1 if INTR asserted */
-       if (hwif->INB(hwif->dma_status) & 4)
+       if (inb(hwif->dma_base + ATA_DMA_STATUS) & 4)
                return 1;
 
        /* return 1 if Device INTR asserted */
@@ -382,7 +382,7 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
        }
 
        /* return 1 if INTR asserted */
-       if (readb((void __iomem *)hwif->dma_status) & 0x04)
+       if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
                return 1;
 
        /* return 1 if Device INTR asserted */
@@ -601,7 +601,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
         *      Fill in the basic hwif bits
         */
        hwif->host_flags |= IDE_HFLAG_MMIO;
-       default_hwif_mmiops(hwif);
+
        hwif->hwif_data = addr;
 
        /*
index 6efbde297174b9b32a8405b227e9acf6abb69e77..f82a6502c1b721069bc1cadbeff89028dc97fc77 100644 (file)
@@ -157,9 +157,9 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
         * Was DMA enabled?  If so, disable it - we're resetting the
         * host.  The IDE layer will be handling the drive for us.
         */
-       dma_cmd = inb(hwif->dma_command);
+       dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
        if (dma_cmd & 1) {
-               outb(dma_cmd & ~1, hwif->dma_command);
+               outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
                printk("sl82c105: DMA was enabled\n");
        }
 
index 9b4b27a4c71121c30298052a30c653f5ccffee05..477e1979010230d4f1662af093b535520d9ccd95 100644 (file)
@@ -63,7 +63,7 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
        ide_hwif_t *hwif        = HWIF(drive);
        ide_expiry_t *expiry    = ide_get_hwifdata(hwif);
        ide_hwgroup_t *hwgroup  = HWGROUP(drive);
-       u8 dma_stat             = inb(hwif->dma_status);
+       u8 dma_stat             = inb(hwif->dma_base + ATA_DMA_STATUS);
 
        /* Restore a higher level driver's expiry handler first. */
        hwgroup->expiry = expiry;
@@ -71,21 +71,24 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
        if ((dma_stat & 5) == 1) {      /* DMA active and no interrupt */
                unsigned long sc_base   = hwif->config_data;
                unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
-               u8 dma_cmd              = inb(hwif->dma_command);
+               u8 dma_cmd              = inb(hwif->dma_base + ATA_DMA_CMD);
 
                printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
                       "attempting recovery...\n", drive->name);
 
                /* Stop DMA */
-               outb(dma_cmd & ~0x01, hwif->dma_command);
+               outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
 
                /* Setup the dummy DMA transfer */
                outw(0, sc_base + 0x0a);        /* Sector Count */
                outw(0, twcr_port);     /* Transfer Word Count 1 or 2 */
 
                /* Start the dummy DMA transfer */
-               outb(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */
-               outb(0x01, hwif->dma_command); /* set START_STOPBM */
+
+               /* clear R_OR_WCTR for write */
+               outb(0x00, hwif->dma_base + ATA_DMA_CMD);
+               /* set START_STOPBM */
+               outb(0x01, hwif->dma_base + ATA_DMA_CMD);
 
                /*
                 * If an interrupt was pending, it should come thru shortly.
@@ -203,8 +206,7 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
        .init_hwif      = init_hwif_tc86c001,
        .port_ops       = &tc86c001_port_ops,
        .dma_ops        = &tc86c001_dma_ops,
-       .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
-                         IDE_HFLAG_ABUSE_SET_DMA_MODE,
+       .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
        .pio_mask       = ATA_PIO4,
        .mwdma_mask     = ATA_MWDMA2,
        .udma_mask      = ATA_UDMA4,
index e47384c70c40bee808d79ef4fc2f127418bb8e47..09dc4803ef9d282c528240dee980f4d55f080a79 100644 (file)
@@ -425,7 +425,6 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = {
        .enablebits     = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
        .port_ops       = &via_port_ops,
        .host_flags     = IDE_HFLAG_PIO_NO_BLACKLIST |
-                         IDE_HFLAG_ABUSE_SET_DMA_MODE |
                          IDE_HFLAG_POST_SET_MODE |
                          IDE_HFLAG_IO_32BIT,
        .pio_mask       = ATA_PIO5,
index 93fb9067c0430f7add87b99fa41f39f31ba0d4c5..c521bf6e1bf2760f4034ac034fefe005ac5699ea 100644 (file)
@@ -48,6 +48,8 @@
 #include <asm/mediabay.h>
 #endif
 
+#define DRV_NAME "ide-pmac"
+
 #undef IDE_PMAC_DEBUG
 
 #define DMA_WAIT_TIMEOUT       50
@@ -424,7 +426,9 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
 static void
 pmac_ide_selectproc(ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
 
        if (pmif == NULL)
                return;
@@ -444,7 +448,9 @@ pmac_ide_selectproc(ide_drive_t *drive)
 static void
 pmac_ide_kauai_selectproc(ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
 
        if (pmif == NULL)
                return;
@@ -465,7 +471,9 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
 static void
 pmac_ide_do_update_timings(ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
 
        if (pmif == NULL)
                return;
@@ -478,12 +486,26 @@ pmac_ide_do_update_timings(ide_drive_t *drive)
                pmac_ide_selectproc(drive);
 }
 
-static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
+static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
 {
-       u32 tmp;
-       
-       writeb(value, (void __iomem *) port);
-       tmp = readl((void __iomem *)(hwif->io_ports.data_addr
+       writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+       (void)readl((void __iomem *)(hwif->io_ports.data_addr
+                                    + IDE_TIMING_CONFIG));
+}
+
+static void pmac_set_irq(ide_hwif_t *hwif, int on)
+{
+       u8 ctl = ATA_DEVCTL_OBS;
+
+       if (on == 4) { /* hack for SRST */
+               ctl |= 4;
+               on &= ~4;
+       }
+
+       ctl |= on ? 0 : 2;
+
+       writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+       (void)readl((void __iomem *)(hwif->io_ports.data_addr
                                     + IDE_TIMING_CONFIG));
 }
 
@@ -493,11 +515,13 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
 static void
 pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
 {
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
        u32 *timings, t;
        unsigned accessTicks, recTicks;
        unsigned accessTime, recTime;
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
        unsigned int cycle_time;
 
        if (pmif == NULL)
@@ -778,9 +802,11 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
 
 static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
 {
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        int unit = (drive->select.b.unit & 0x01);
        int ret = 0;
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
        u32 *timings, *timings2, tl[2];
 
        timings = &pmif->timings[unit];
@@ -852,11 +878,8 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
 /* Suspend call back, should be called after the child devices
  * have actually been suspended
  */
-static int
-pmac_ide_do_suspend(ide_hwif_t *hwif)
+static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
 {
-       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
-       
        /* We clear the timings */
        pmif->timings[0] = 0;
        pmif->timings[1] = 0;
@@ -884,11 +907,8 @@ pmac_ide_do_suspend(ide_hwif_t *hwif)
 /* Resume call back, should be called before the child devices
  * are resumed
  */
-static int
-pmac_ide_do_resume(ide_hwif_t *hwif)
+static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
 {
-       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
-       
        /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
        if (!pmif->mediabay) {
                ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
@@ -916,7 +936,8 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
 
 static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
 {
-       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)ide_get_hwifdata(hwif);
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        struct device_node *np = pmif->node;
        const char *cable = of_get_property(np, "cable-type", NULL);
 
@@ -936,7 +957,40 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
        return ATA_CBL_PATA40;
 }
 
+static void pmac_ide_init_dev(ide_drive_t *drive)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+
+       if (pmif->mediabay) {
+#ifdef CONFIG_PMAC_MEDIABAY
+               if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
+                       drive->noprobe = 0;
+                       return;
+               }
+#endif
+               drive->noprobe = 1;
+       }
+}
+
+static const struct ide_tp_ops pmac_tp_ops = {
+       .exec_command           = pmac_exec_command,
+       .read_status            = ide_read_status,
+       .read_altstatus         = ide_read_altstatus,
+       .read_sff_dma_status    = ide_read_sff_dma_status,
+
+       .set_irq                = pmac_set_irq,
+
+       .tf_load                = ide_tf_load,
+       .tf_read                = ide_tf_read,
+
+       .input_data             = ide_input_data,
+       .output_data            = ide_output_data,
+};
+
 static const struct ide_port_ops pmac_ide_ata6_port_ops = {
+       .init_dev               = pmac_ide_init_dev,
        .set_pio_mode           = pmac_ide_set_pio_mode,
        .set_dma_mode           = pmac_ide_set_dma_mode,
        .selectproc             = pmac_ide_kauai_selectproc,
@@ -944,6 +998,7 @@ static const struct ide_port_ops pmac_ide_ata6_port_ops = {
 };
 
 static const struct ide_port_ops pmac_ide_ata4_port_ops = {
+       .init_dev               = pmac_ide_init_dev,
        .set_pio_mode           = pmac_ide_set_pio_mode,
        .set_dma_mode           = pmac_ide_set_dma_mode,
        .selectproc             = pmac_ide_selectproc,
@@ -951,6 +1006,7 @@ static const struct ide_port_ops pmac_ide_ata4_port_ops = {
 };
 
 static const struct ide_port_ops pmac_ide_port_ops = {
+       .init_dev               = pmac_ide_init_dev,
        .set_pio_mode           = pmac_ide_set_pio_mode,
        .set_dma_mode           = pmac_ide_set_dma_mode,
        .selectproc             = pmac_ide_selectproc,
@@ -959,12 +1015,14 @@ static const struct ide_port_ops pmac_ide_port_ops = {
 static const struct ide_dma_ops pmac_dma_ops;
 
 static const struct ide_port_info pmac_port_info = {
+       .name                   = DRV_NAME,
        .init_dma               = pmac_ide_init_dma,
        .chipset                = ide_pmac,
+       .tp_ops                 = &pmac_tp_ops,
+       .port_ops               = &pmac_ide_port_ops,
 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
        .dma_ops                = &pmac_dma_ops,
 #endif
-       .port_ops               = &pmac_ide_port_ops,
        .host_flags             = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
                                  IDE_HFLAG_POST_SET_MODE |
                                  IDE_HFLAG_MMIO |
@@ -977,13 +1035,15 @@ static const struct ide_port_info pmac_port_info = {
  * Setup, register & probe an IDE channel driven by this driver, this is
  * called by one of the 2 probe functions (macio or PCI).
  */
-static int __devinit
-pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
+static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
 {
        struct device_node *np = pmif->node;
        const int *bidp;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       struct ide_host *host;
+       ide_hwif_t *hwif;
+       hw_regs_t *hws[] = { hw, NULL, NULL, NULL };
        struct ide_port_info d = pmac_port_info;
+       int rc;
 
        pmif->broken_dma = pmif->broken_dma_warn = 0;
        if (of_device_is_compatible(np, "shasta-ata")) {
@@ -1054,31 +1114,16 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
                msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
        }
 
-       /* Setup MMIO ops */
-       default_hwif_mmiops(hwif);
-               hwif->OUTBSYNC = pmac_outbsync;
+       printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
+                        "bus ID %d%s, irq %d\n", model_name[pmif->kind],
+                        pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
+                        pmif->mediabay ? " (mediabay)" : "", hw->irq);
 
-       hwif->hwif_data = pmif;
-       ide_init_port_hw(hwif, hw);
+       rc = ide_host_add(&d, hws, &host);
+       if (rc)
+               return rc;
 
-       printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
-              hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
-              pmif->mediabay ? " (mediabay)" : "", hwif->irq);
-
-       if (pmif->mediabay) {
-#ifdef CONFIG_PMAC_MEDIABAY
-               if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
-#else
-               if (1) {
-#endif
-                       hwif->drives[0].noprobe = 1;
-                       hwif->drives[1].noprobe = 1;
-               }
-       }
-
-       idx[0] = hwif->index;
-
-       ide_device_add(idx, &d);
+       hwif = host->ports[0];
 
        return 0;
 }
@@ -1101,7 +1146,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
        void __iomem *base;
        unsigned long regbase;
-       ide_hwif_t *hwif;
        pmac_ide_hwif_t *pmif;
        int irq, rc;
        hw_regs_t hw;
@@ -1110,14 +1154,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
        if (pmif == NULL)
                return -ENOMEM;
 
-       hwif = ide_find_port();
-       if (hwif == NULL) {
-               printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
-               printk(KERN_ERR "          %s\n", mdev->ofdev.node->full_name);
-               rc = -ENODEV;
-               goto out_free_pmif;
-       }
-
        if (macio_resource_count(mdev) == 0) {
                printk(KERN_WARNING "ide-pmac: no address for %s\n",
                                    mdev->ofdev.node->full_name);
@@ -1164,7 +1200,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
        } else
                pmif->dma_regs = NULL;
 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
-       dev_set_drvdata(&mdev->ofdev.dev, hwif);
+       dev_set_drvdata(&mdev->ofdev.dev, pmif);
 
        memset(&hw, 0, sizeof(hw));
        pmac_ide_init_ports(&hw, pmif->regbase);
@@ -1172,7 +1208,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
        hw.dev = &mdev->bus->pdev->dev;
        hw.parent = &mdev->ofdev.dev;
 
-       rc = pmac_ide_setup_device(pmif, hwif, &hw);
+       rc = pmac_ide_setup_device(pmif, &hw);
        if (rc != 0) {
                /* The inteface is released to the common IDE layer */
                dev_set_drvdata(&mdev->ofdev.dev, NULL);
@@ -1195,12 +1231,13 @@ out_free_pmif:
 static int
 pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
 {
-       ide_hwif_t      *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
-       int             rc = 0;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+       int rc = 0;
 
        if (mesg.event != mdev->ofdev.dev.power.power_state.event
                        && (mesg.event & PM_EVENT_SLEEP)) {
-               rc = pmac_ide_do_suspend(hwif);
+               rc = pmac_ide_do_suspend(pmif);
                if (rc == 0)
                        mdev->ofdev.dev.power.power_state = mesg;
        }
@@ -1211,11 +1248,12 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
 static int
 pmac_ide_macio_resume(struct macio_dev *mdev)
 {
-       ide_hwif_t      *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
-       int             rc = 0;
-       
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+       int rc = 0;
+
        if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
-               rc = pmac_ide_do_resume(hwif);
+               rc = pmac_ide_do_resume(pmif);
                if (rc == 0)
                        mdev->ofdev.dev.power.power_state = PMSG_ON;
        }
@@ -1229,7 +1267,6 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
 static int __devinit
 pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       ide_hwif_t *hwif;
        struct device_node *np;
        pmac_ide_hwif_t *pmif;
        void __iomem *base;
@@ -1247,14 +1284,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        if (pmif == NULL)
                return -ENOMEM;
 
-       hwif = ide_find_port();
-       if (hwif == NULL) {
-               printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
-               printk(KERN_ERR "          %s\n", np->full_name);
-               rc = -ENODEV;
-               goto out_free_pmif;
-       }
-
        if (pci_enable_device(pdev)) {
                printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
                                    "%s\n", np->full_name);
@@ -1284,14 +1313,14 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        pmif->kauai_fcr = base;
        pmif->irq = pdev->irq;
 
-       pci_set_drvdata(pdev, hwif);
+       pci_set_drvdata(pdev, pmif);
 
        memset(&hw, 0, sizeof(hw));
        pmac_ide_init_ports(&hw, pmif->regbase);
        hw.irq = pdev->irq;
        hw.dev = &pdev->dev;
 
-       rc = pmac_ide_setup_device(pmif, hwif, &hw);
+       rc = pmac_ide_setup_device(pmif, &hw);
        if (rc != 0) {
                /* The inteface is released to the common IDE layer */
                pci_set_drvdata(pdev, NULL);
@@ -1310,12 +1339,12 @@ out_free_pmif:
 static int
 pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
-       ide_hwif_t      *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
-       int             rc = 0;
-       
+       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
+       int rc = 0;
+
        if (mesg.event != pdev->dev.power.power_state.event
                        && (mesg.event & PM_EVENT_SLEEP)) {
-               rc = pmac_ide_do_suspend(hwif);
+               rc = pmac_ide_do_suspend(pmif);
                if (rc == 0)
                        pdev->dev.power.power_state = mesg;
        }
@@ -1326,11 +1355,11 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
 static int
 pmac_ide_pci_resume(struct pci_dev *pdev)
 {
-       ide_hwif_t      *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
-       int             rc = 0;
-       
+       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
+       int rc = 0;
+
        if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
-               rc = pmac_ide_do_resume(hwif);
+               rc = pmac_ide_do_resume(pmif);
                if (rc == 0)
                        pdev->dev.power.power_state = PMSG_ON;
        }
@@ -1421,10 +1450,11 @@ out:
 static int
 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
 {
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        struct dbdma_cmd *table;
        int i, count = 0;
-       ide_hwif_t *hwif = HWIF(drive);
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
        volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
        struct scatterlist *sg;
        int wr = (rq_data_dir(rq) == WRITE);
@@ -1520,7 +1550,8 @@ static int
 pmac_ide_dma_setup(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        struct request *rq = HWGROUP(drive)->rq;
        u8 unit = (drive->select.b.unit & 0x01);
        u8 ata4;
@@ -1560,7 +1591,9 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
 static void
 pmac_ide_dma_start(ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        volatile struct dbdma_regs __iomem *dma;
 
        dma = pmif->dma_regs;
@@ -1576,7 +1609,9 @@ pmac_ide_dma_start(ide_drive_t *drive)
 static int
 pmac_ide_dma_end (ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        volatile struct dbdma_regs __iomem *dma;
        u32 dstat;
        
@@ -1604,7 +1639,9 @@ pmac_ide_dma_end (ide_drive_t *drive)
 static int
 pmac_ide_dma_test_irq (ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        volatile struct dbdma_regs __iomem *dma;
        unsigned long status, timeout;
 
@@ -1664,7 +1701,9 @@ static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
 static void
 pmac_ide_dma_lost_irq (ide_drive_t *drive)
 {
-       pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+       ide_hwif_t *hwif = drive->hwif;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        volatile struct dbdma_regs __iomem *dma;
        unsigned long status;
 
@@ -1694,7 +1733,8 @@ static const struct ide_dma_ops pmac_dma_ops = {
 static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
                                       const struct ide_port_info *d)
 {
-       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
+       pmac_ide_hwif_t *pmif =
+               (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
        struct pci_dev *dev = to_pci_dev(hwif->dev);
 
        /* We won't need pci_dev if we switch to generic consistent
index 65fc08b6b6d0e813f4eed2e5594c1149a5233d7b..b15cad58dc81ea781b4ac7b366759d6ab3de20e8 100644 (file)
@@ -73,15 +73,12 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
  *     @d: IDE port info
  *
  *     Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
- *     Where a device has a partner that is already in DMA mode we check
- *     and enforce IDE simplex rules.
  */
 
 unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
        unsigned long dma_base = 0;
-       u8 dma_stat = 0;
 
        if (hwif->host_flags & IDE_HFLAG_MMIO)
                return hwif->dma_base;
@@ -102,11 +99,19 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
        if (hwif->channel)
                dma_base += 8;
 
-       if (d->host_flags & IDE_HFLAG_CS5520)
+       return dma_base;
+}
+EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+
+int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+       u8 dma_stat;
+
+       if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
                goto out;
 
        if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
-               ide_pci_clear_simplex(dma_base, d->name);
+               ide_pci_clear_simplex(hwif->dma_base, d->name);
                goto out;
        }
 
@@ -120,15 +125,15 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
         * we tune the drive then try to grab DMA ownership if we want to be
         * the DMA end.  This has to be become dynamic to handle hot-plug.
         */
-       dma_stat = hwif->INB(dma_base + 2);
+       dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
        if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
                printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name);
-               dma_base = 0;
+               return -1;
        }
 out:
-       return dma_base;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
 
 /*
  * Set up BM-DMA capability (PnP BIOS should have done this)
@@ -284,33 +289,31 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
 }
 
 /**
- *     ide_hwif_configure      -       configure an IDE interface
+ *     ide_hw_configure        -       configure a hw_regs_t instance
  *     @dev: PCI device holding interface
  *     @d: IDE port info
  *     @port: port number
  *     @irq: PCI IRQ
+ *     @hw: hw_regs_t instance corresponding to this port
  *
  *     Perform the initial set up for the hardware interface structure. This
  *     is done per interface port rather than per PCI device. There may be
  *     more than one port per device.
  *
- *     Returns the new hardware interface structure, or NULL on a failure
+ *     Returns zero on success or an error code.
  */
 
-static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
-                                     const struct ide_port_info *d,
-                                     unsigned int port, int irq)
+static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
+                           unsigned int port, int irq, hw_regs_t *hw)
 {
        unsigned long ctl = 0, base = 0;
-       ide_hwif_t *hwif;
-       struct hw_regs_s hw;
 
        if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
                if (ide_pci_check_iomem(dev, d, 2 * port) ||
                    ide_pci_check_iomem(dev, d, 2 * port + 1)) {
                        printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
                                        "as MEM for port %d!\n", d->name, port);
-                       return NULL;
+                       return -EINVAL;
                }
 
                ctl  = pci_resource_start(dev, 2*port+1);
@@ -324,22 +327,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
        if (!base || !ctl) {
                printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n",
                                d->name, port);
-               return NULL;
+               return -EINVAL;
        }
 
-       hwif = ide_find_port_slot(d);
-       if (hwif == NULL)
-               return NULL;
-
-       memset(&hw, 0, sizeof(hw));
-       hw.irq = irq;
-       hw.dev = &dev->dev;
-       hw.chipset = d->chipset ? d->chipset : ide_pci;
-       ide_std_init_ports(&hw, base, ctl | 2);
-
-       ide_init_port_hw(hwif, &hw);
+       memset(hw, 0, sizeof(*hw));
+       hw->irq = irq;
+       hw->dev = &dev->dev;
+       hw->chipset = d->chipset ? d->chipset : ide_pci;
+       ide_std_init_ports(hw, base, ctl | 2);
 
-       return hwif;
+       return 0;
 }
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -362,7 +359,15 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
             (dev->class & 0x80))) {
                unsigned long base = ide_pci_dma_base(hwif, d);
 
-               if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+               if (base == 0)
+                       return -1;
+
+               hwif->dma_base = base;
+
+               if (ide_pci_check_simplex(hwif, d) < 0)
+                       return -1;
+
+               if (ide_pci_set_master(dev, d->name) < 0)
                        return -1;
 
                if (hwif->host_flags & IDE_HFLAG_MMIO)
@@ -376,7 +381,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
                if (ide_allocate_dma_engine(hwif))
                        return -1;
 
-               ide_setup_dma(hwif, base);
+               hwif->dma_ops = &sff_dma_ops;
        }
 
        return 0;
@@ -429,7 +434,8 @@ out:
  *     @dev: PCI device
  *     @d: IDE port info
  *     @pciirq: IRQ line
- *     @idx: ATA index table to update
+ *     @hw: hw_regs_t instances corresponding to this PCI IDE device
+ *     @hws: hw_regs_t pointers table to update
  *
  *     Scan the interfaces attached to this device and do any
  *     necessary per port setup. Attach the devices and ask the
@@ -440,10 +446,10 @@ out:
  *     where the chipset setup is not the default PCI IDE one.
  */
 
-void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int pciirq, u8 *idx)
+void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
+                        int pciirq, hw_regs_t *hw, hw_regs_t **hws)
 {
        int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
-       ide_hwif_t *hwif;
        u8 tmp;
 
        /*
@@ -459,11 +465,10 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
                        continue;       /* port not enabled */
                }
 
-               hwif = ide_hwif_configure(dev, d, port, pciirq);
-               if (hwif == NULL)
+               if (ide_hw_configure(dev, d, port, pciirq, hw + port))
                        continue;
 
-               *(idx + port) = hwif->index;
+               *(hws + port) = hw + port;
        }
 }
 EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
@@ -480,7 +485,7 @@ EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
  */
 static int do_ide_setup_pci_device(struct pci_dev *dev,
                                   const struct ide_port_info *d,
-                                  u8 *idx, u8 noisy)
+                                  u8 noisy)
 {
        int tried_config = 0;
        int pciirq, ret;
@@ -529,22 +534,24 @@ static int do_ide_setup_pci_device(struct pci_dev *dev,
                                d->name, pciirq);
        }
 
-       /* FIXME: silent failure can happen */
-
-       ide_pci_setup_ports(dev, d, pciirq, idx);
+       ret = pciirq;
 out:
        return ret;
 }
 
 int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
 {
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
        int ret;
 
-       ret = do_ide_setup_pci_device(dev, d, &idx[0], 1);
+       ret = do_ide_setup_pci_device(dev, d, 1);
+
+       if (ret >= 0) {
+               /* FIXME: silent failure can happen */
+               ide_pci_setup_ports(dev, d, ret, &hw[0], &hws[0]);
 
-       if (ret >= 0)
-               ide_device_add(idx, d);
+               ret = ide_host_add(d, hws, NULL);
+       }
 
        return ret;
 }
@@ -555,19 +562,23 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
 {
        struct pci_dev *pdev[] = { dev1, dev2 };
        int ret, i;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
 
        for (i = 0; i < 2; i++) {
-               ret = do_ide_setup_pci_device(pdev[i], d, &idx[i*2], !i);
+               ret = do_ide_setup_pci_device(pdev[i], d, !i);
+
                /*
                 * FIXME: Mom, mom, they stole me the helper function to undo
                 * do_ide_setup_pci_device() on the first device!
                 */
                if (ret < 0)
                        goto out;
+
+               /* FIXME: silent failure can happen */
+               ide_pci_setup_ports(pdev[i], d, ret, &hw[i*2], &hws[i*2]);
        }
 
-       ide_device_add(idx, d);
+       ret = ide_host_add(d, hws, NULL);
 out:
        return ret;
 }
index f843c1383a4b599480fdd53c9887a1263132e294..538552495d486da9f896e5cdc3067e360ea27109 100644 (file)
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj {
        struct Scsi_Host        *host;
 
        struct ide_atapi_pc *pc;                /* Current packet command */
-       unsigned long flags;                    /* Status/Action flags */
        unsigned long transform;                /* SCSI cmd translation layer */
        unsigned long log;                      /* log flags */
 } idescsi_scsi_t;
@@ -125,16 +124,6 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
        return scsihost_to_idescsi(ide_drive->driver_data);
 }
 
-/*
- *     Per ATAPI device status bits.
- */
-#define IDESCSI_DRQ_INTERRUPT          0       /* DRQ interrupt device */
-
-/*
- *     ide-scsi requests.
- */
-#define IDESCSI_PC_RQ                  90
-
 /*
  *     PIO data transfer routine using the scatter gather table.
  */
@@ -142,7 +131,8 @@ static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                                unsigned int bcount, int write)
 {
        ide_hwif_t *hwif = drive->hwif;
-       xfer_func_t *xf = write ? hwif->output_data : hwif->input_data;
+       const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+       xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
        char *buf;
        int count;
 
@@ -228,7 +218,6 @@ static int idescsi_check_condition(ide_drive_t *drive,
        rq->cmd_type = REQ_TYPE_SENSE;
        rq->cmd_flags |= REQ_PREEMPT;
        pc->timeout = jiffies + WAIT_READY;
-       pc->callback = ide_scsi_callback;
        /* NOTE! Save the failed packet command in "rq->buffer" */
        rq->buffer = (void *) failed_cmd->special;
        pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -237,6 +226,7 @@ static int idescsi_check_condition(ide_drive_t *drive,
                ide_scsi_hex_dump(pc->c, 6);
        }
        rq->rq_disk = scsi->disk;
+       memcpy(rq->cmd, pc->c, 12);
        ide_do_drive_cmd(drive, rq);
        return 0;
 }
@@ -246,10 +236,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
 {
        ide_hwif_t *hwif = drive->hwif;
 
-       if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+       if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
                /* force an abort */
-               hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
-                              hwif->io_ports.command_addr);
+               hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
 
        rq->errors++;
 
@@ -421,10 +410,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
 
        if (blk_sense_request(rq) || blk_special_request(rq)) {
                struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
-               idescsi_scsi_t *scsi = drive_to_idescsi(drive);
-
-               if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
-                       pc->flags |= PC_FLAG_DRQ_INTERRUPT;
 
                if (drive->using_dma && !idescsi_map_sg(drive, pc))
                        pc->flags |= PC_FLAG_DMA_OK;
@@ -460,11 +445,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
 static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
 {
        if (drive->id && (drive->id->config & 0x0060) == 0x20)
-               set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags);
+               set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
        clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
 #if IDESCSI_DEBUG_LOG
        set_bit(IDESCSI_LOG_CMD, &scsi->log);
 #endif /* IDESCSI_DEBUG_LOG */
+
+       drive->pc_callback = ide_scsi_callback;
+
        idescsi_add_settings(drive);
 }
 
@@ -616,7 +604,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
        pc->scsi_cmd = cmd;
        pc->done = done;
        pc->timeout = jiffies + cmd->timeout_per_command;
-       pc->callback = ide_scsi_callback;
 
        if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
                printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -631,6 +618,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
        rq->special = (char *) pc;
        rq->cmd_type = REQ_TYPE_SPECIAL;
        spin_unlock_irq(host->host_lock);
+       memcpy(rq->cmd, pc->c, 12);
        blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
        spin_lock_irq(host->host_lock);
        return 0;
index 93e407ee08b91a97614103cb9520c307c4c7fd53..1ff80de177db21be26be01ffa35831854821e97d 100644 (file)
@@ -201,6 +201,10 @@ static void cpm_uart_int_tx(struct uart_port *port)
        cpm_uart_tx_pump(port);
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_polled;
+#endif
+
 /*
  * Receive characters
  */
@@ -222,6 +226,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
         */
        bdp = pinfo->rx_cur;
        for (;;) {
+#ifdef CONFIG_CONSOLE_POLL
+               if (unlikely(serial_polled)) {
+                       serial_polled = 0;
+                       return;
+               }
+#endif
                /* get status */
                status = in_be16(&bdp->cbd_sc);
                /* If this one is empty, return happy */
@@ -253,7 +263,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
                                goto handle_error;
                        if (uart_handle_sysrq_char(port, ch))
                                continue;
-
+#ifdef CONFIG_CONSOLE_POLL
+                       if (unlikely(serial_polled)) {
+                               serial_polled = 0;
+                               return;
+                       }
+#endif
                      error_return:
                        tty_insert_flip_char(tty, ch, flg);
 
@@ -865,6 +880,80 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
                cpm_uart_request_port(port);
        }
 }
+
+#ifdef CONFIG_CONSOLE_POLL
+/* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+#define GDB_BUF_SIZE   512     /* power of 2, please */
+
+static char poll_buf[GDB_BUF_SIZE];
+static char *pollp;
+static int poll_chars;
+
+static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
+{
+       u_char          c, *cp;
+       volatile cbd_t  *bdp;
+       int             i;
+
+       /* Get the address of the host memory buffer.
+        */
+       bdp = pinfo->rx_cur;
+       while (bdp->cbd_sc & BD_SC_EMPTY)
+               ;
+
+       /* If the buffer address is in the CPM DPRAM, don't
+        * convert it.
+        */
+       cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
+
+       if (obuf) {
+               i = c = bdp->cbd_datlen;
+               while (i-- > 0)
+                       *obuf++ = *cp++;
+       } else
+               c = *cp;
+       bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID);
+       bdp->cbd_sc |= BD_SC_EMPTY;
+
+       if (bdp->cbd_sc & BD_SC_WRAP)
+               bdp = pinfo->rx_bd_base;
+       else
+               bdp++;
+       pinfo->rx_cur = (cbd_t *)bdp;
+
+       return (int)c;
+}
+
+static int cpm_get_poll_char(struct uart_port *port)
+{
+       struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+       if (!serial_polled) {
+               serial_polled = 1;
+               poll_chars = 0;
+       }
+       if (poll_chars <= 0) {
+               poll_chars = poll_wait_key(poll_buf, pinfo);
+               pollp = poll_buf;
+       }
+       poll_chars--;
+       return *pollp++;
+}
+
+static void cpm_put_poll_char(struct uart_port *port,
+                        unsigned char c)
+{
+       struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+       static char ch[2];
+
+       ch[0] = (char)c;
+       cpm_uart_early_write(pinfo->port.line, ch, 1);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
 static struct uart_ops cpm_uart_pops = {
        .tx_empty       = cpm_uart_tx_empty,
        .set_mctrl      = cpm_uart_set_mctrl,
@@ -882,6 +971,10 @@ static struct uart_ops cpm_uart_pops = {
        .request_port   = cpm_uart_request_port,
        .config_port    = cpm_uart_config_port,
        .verify_port    = cpm_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char = cpm_get_poll_char,
+       .poll_put_char = cpm_put_poll_char,
+#endif
 };
 
 struct uart_cpm_port cpm_uart_ports[UART_NR];
index c9f53e71f252eb2833db26f645a9621f0f68f7ba..61d3ade5286c6a50a55d7735b03f93284a631de4 100644 (file)
@@ -921,6 +921,10 @@ static int mpsc_make_ready(struct mpsc_port_info *pi)
        return 0;
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_polled;
+#endif
+
 /*
  ******************************************************************************
  *
@@ -956,7 +960,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
        while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
                                & SDMA_DESC_CMDSTAT_O)) {
                bytes_in = be16_to_cpu(rxre->bytecnt);
-
+#ifdef CONFIG_CONSOLE_POLL
+               if (unlikely(serial_polled)) {
+                       serial_polled = 0;
+                       return 0;
+               }
+#endif
                /* Following use of tty struct directly is deprecated */
                if (unlikely(tty_buffer_request_room(tty, bytes_in)
                                        < bytes_in)) {
@@ -1017,6 +1026,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
                if (uart_handle_sysrq_char(&pi->port, *bp)) {
                        bp++;
                        bytes_in--;
+#ifdef CONFIG_CONSOLE_POLL
+                       if (unlikely(serial_polled)) {
+                               serial_polled = 0;
+                               return 0;
+                       }
+#endif
                        goto next_frame;
                }
 
@@ -1519,6 +1534,133 @@ static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
 
        return rc;
 }
+#ifdef CONFIG_CONSOLE_POLL
+/* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static char poll_buf[2048];
+static int poll_ptr;
+static int poll_cnt;
+static void mpsc_put_poll_char(struct uart_port *port,
+                                                          unsigned char c);
+
+static int mpsc_get_poll_char(struct uart_port *port)
+{
+       struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+       struct mpsc_rx_desc *rxre;
+       u32     cmdstat, bytes_in, i;
+       u8      *bp;
+
+       if (!serial_polled)
+               serial_polled = 1;
+
+       pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
+
+       if (poll_cnt) {
+               poll_cnt--;
+               return poll_buf[poll_ptr++];
+       }
+       poll_ptr = 0;
+       poll_cnt = 0;
+
+       while (poll_cnt == 0) {
+               rxre = (struct mpsc_rx_desc *)(pi->rxr +
+                      (pi->rxr_posn*MPSC_RXRE_SIZE));
+               dma_cache_sync(pi->port.dev, (void *)rxre,
+                              MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+               if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+                       invalidate_dcache_range((ulong)rxre,
+                       (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+               /*
+                * Loop through Rx descriptors handling ones that have
+                * been completed.
+                */
+               while (poll_cnt == 0 &&
+                      !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
+                        SDMA_DESC_CMDSTAT_O)){
+                       bytes_in = be16_to_cpu(rxre->bytecnt);
+                       bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
+                       dma_cache_sync(pi->port.dev, (void *) bp,
+                                      MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+                       if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+                               invalidate_dcache_range((ulong)bp,
+                                       (ulong)bp + MPSC_RXBE_SIZE);
+#endif
+                       if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
+                        SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
+                               !(cmdstat & pi->port.ignore_status_mask)) {
+                               poll_buf[poll_cnt] = *bp;
+                               poll_cnt++;
+                       } else {
+                               for (i = 0; i < bytes_in; i++) {
+                                       poll_buf[poll_cnt] = *bp++;
+                                       poll_cnt++;
+                               }
+                               pi->port.icount.rx += bytes_in;
+                       }
+                       rxre->bytecnt = cpu_to_be16(0);
+                       wmb();
+                       rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
+                                                   SDMA_DESC_CMDSTAT_EI |
+                                                   SDMA_DESC_CMDSTAT_F |
+                                                   SDMA_DESC_CMDSTAT_L);
+                       wmb();
+                       dma_cache_sync(pi->port.dev, (void *)rxre,
+                                      MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+                       if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+                               flush_dcache_range((ulong)rxre,
+                                          (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+
+                       /* Advance to next descriptor */
+                       pi->rxr_posn = (pi->rxr_posn + 1) &
+                               (MPSC_RXR_ENTRIES - 1);
+                       rxre = (struct mpsc_rx_desc *)(pi->rxr +
+                                      (pi->rxr_posn * MPSC_RXRE_SIZE));
+                       dma_cache_sync(pi->port.dev, (void *)rxre,
+                                      MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+                       if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+                               invalidate_dcache_range((ulong)rxre,
+                                               (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+               }
+
+               /* Restart rx engine, if its stopped */
+               if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
+                       mpsc_start_rx(pi);
+       }
+       if (poll_cnt) {
+               poll_cnt--;
+               return poll_buf[poll_ptr++];
+       }
+
+       return 0;
+}
+
+
+static void mpsc_put_poll_char(struct uart_port *port,
+                        unsigned char c)
+{
+       struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+       u32 data;
+
+       data = readl(pi->mpsc_base + MPSC_MPCR);
+       writeb(c, pi->mpsc_base + MPSC_CHR_1);
+       mb();
+       data = readl(pi->mpsc_base + MPSC_CHR_2);
+       data |= MPSC_CHR_2_TTCS;
+       writel(data, pi->mpsc_base + MPSC_CHR_2);
+       mb();
+
+       while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
+}
+#endif
 
 static struct uart_ops mpsc_pops = {
        .tx_empty       = mpsc_tx_empty,
@@ -1537,6 +1679,10 @@ static struct uart_ops mpsc_pops = {
        .request_port   = mpsc_request_port,
        .config_port    = mpsc_config_port,
        .verify_port    = mpsc_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char = mpsc_get_poll_char,
+       .poll_put_char = mpsc_put_poll_char,
+#endif
 };
 
 /*
index 90d14ee564f567c2515dbced773eebda2d494063..ef4f5da2029f30af6e493e4ff9082d34abd0c40c 100644 (file)
@@ -198,17 +198,13 @@ iop_chan_memset_slot_count(size_t len, int *slots_per_op)
 static inline int
 iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
 {
-       int num_slots;
-       /* slots_to_find = 1 for basic descriptor + 1 per 4 sources above 1
-        * (1 source => 8 bytes) (1 slot => 32 bytes)
-        */
-       num_slots = 1 + (((src_cnt - 1) << 3) >> 5);
-       if (((src_cnt - 1) << 3) & 0x1f)
-               num_slots++;
-
-       *slots_per_op = num_slots;
-
-       return num_slots;
+       static const char slot_count_table[] = { 1, 2, 2, 2,
+                                                2, 3, 3, 3,
+                                                3, 4, 4, 4,
+                                                4, 5, 5, 5,
+                                               };
+       *slots_per_op = slot_count_table[src_cnt - 1];
+       return *slots_per_op;
 }
 
 #define ADMA_MAX_BYTE_COUNT    (16 * 1024 * 1024)
index a32b86ac62aa4f4e325dd7c035270a88b072c392..af64676650a22b7f6817d0d809d4fc6d82d8966d 100644 (file)
@@ -260,7 +260,7 @@ static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
                                        int *slots_per_op)
 {
-       static const int slot_count_table[] = { 0,
+       static const char slot_count_table[] = {
                                                1, 1, 1, 1, /* 01 - 04 */
                                                2, 2, 2, 2, /* 05 - 08 */
                                                4, 4, 4, 4, /* 09 - 12 */
@@ -270,7 +270,7 @@ static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
                                                8, 8, 8, 8, /* 25 - 28 */
                                                8, 8, 8, 8, /* 29 - 32 */
                                              };
-       *slots_per_op = slot_count_table[src_cnt];
+       *slots_per_op = slot_count_table[src_cnt - 1];
        return *slots_per_op;
 }
 
diff --git a/include/asm-arm/kgdb.h b/include/asm-arm/kgdb.h
new file mode 100644 (file)
index 0000000..67af4b8
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * ARM KGDB support
+ *
+ * Author: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ *
+ */
+
+#ifndef __ARM_KGDB_H__
+#define __ARM_KGDB_H__
+
+#include <linux/ptrace.h>
+
+/*
+ * GDB assumes that we're a user process being debugged, so
+ * it will send us an SWI command to write into memory as the
+ * debug trap. When an SWI occurs, the next instruction addr is
+ * placed into R14_svc before jumping to the vector trap.
+ * This doesn't work for kernel debugging as we are already in SVC
+ * we would loose the kernel's LR, which is a bad thing. This
+ * is  bad thing.
+ *
+ * By doing this as an undefined instruction trap, we force a mode
+ * switch from SVC to UND mode, allowing us to save full kernel state.
+ *
+ * We also define a KGDB_COMPILED_BREAK which can be used to compile
+ * in breakpoints. This is important for things like sysrq-G and for
+ * the initial breakpoint from trap_init().
+ *
+ * Note to ARM HW designers: Add real trap support like SH && PPC to
+ * make our lives much much simpler. :)
+ */
+#define BREAK_INSTR_SIZE       4
+#define GDB_BREAKINST          0xef9f0001
+#define KGDB_BREAKINST         0xe7ffdefe
+#define KGDB_COMPILED_BREAK    0xe7ffdeff
+#define CACHE_FLUSH_IS_SAFE    1
+
+#ifndef        __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+       asm(".word 0xe7ffdeff");
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * From Kevin Hilman:
+ *
+ * gdb is expecting the following registers layout.
+ *
+ * r0-r15: 1 long word each
+ * f0-f7:  unused, 3 long words each !!
+ * fps:    unused, 1 long word
+ * cpsr:   1 long word
+ *
+ * Even though f0-f7 and fps are not used, they need to be
+ * present in the registers sent for correct processing in
+ * the host-side gdb.
+ *
+ * In particular, it is crucial that CPSR is in the right place,
+ * otherwise gdb will not be able to correctly interpret stepping over
+ * conditional branches.
+ */
+#define _GP_REGS               16
+#define _FP_REGS               8
+#define _EXTRA_REGS            2
+#define GDB_MAX_REGS           (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+
+#define KGDB_MAX_NO_CPUS       1
+#define BUFMAX                 400
+#define NUMREGBYTES            (GDB_MAX_REGS << 2)
+#define NUMCRITREGBYTES                (32 << 2)
+
+#define _R0                    0
+#define _R1                    1
+#define _R2                    2
+#define _R3                    3
+#define _R4                    4
+#define _R5                    5
+#define _R6                    6
+#define _R7                    7
+#define _R8                    8
+#define _R9                    9
+#define _R10                   10
+#define _FP                    11
+#define _IP                    12
+#define _SPT                   13
+#define _LR                    14
+#define _PC                    15
+#define _CPSR                  (GDB_MAX_REGS - 1)
+
+/*
+ * So that we can denote the end of a frame for tracing,
+ * in the simple case:
+ */
+#define CFI_END_FRAME(func)    __CFI_END_FRAME(_PC, _SPT, func)
+
+#endif /* __ASM_KGDB_H__ */
diff --git a/include/asm-arm/plat-orion/mv_xor.h b/include/asm-arm/plat-orion/mv_xor.h
new file mode 100644 (file)
index 0000000..c349e8f
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Marvell XOR platform device data definition file.
+ */
+
+#ifndef __ASM_PLAT_ORION_MV_XOR_H
+#define __ASM_PLAT_ORION_MV_XOR_H
+
+#include <linux/dmaengine.h>
+#include <linux/mbus.h>
+
+#define MV_XOR_SHARED_NAME     "mv_xor_shared"
+#define MV_XOR_NAME            "mv_xor"
+
+struct mbus_dram_target_info;
+
+struct mv_xor_platform_shared_data {
+       struct mbus_dram_target_info    *dram;
+};
+
+struct mv_xor_platform_data {
+       struct platform_device          *shared;
+       int                             hw_id;
+       dma_cap_mask_t                  cap_mask;
+       size_t                          pool_size;
+};
+
+
+#endif
index f1541afcf85c200c457fb48fc93d5c88d250902d..aa399aec568ef02533c2d6694f192f91a8446ab5 100644 (file)
@@ -24,4 +24,6 @@ static inline int in_exception_text(unsigned long ptr)
               ptr < (unsigned long)&__exception_text_end;
 }
 
+extern void __init early_trap_init(void);
+
 #endif
index 31e48b0e732414aff872d901dd4c0f33a5f5b54d..d18a3053be0d33b909ce665b41aaa62211e37a0a 100644 (file)
 #define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
 #define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N))
 
+
+/*
+ * DMAC peripheral hardware handshaking interfaces, used with dw_dmac
+ */
+#define DMAC_MCI_RX            0
+#define DMAC_MCI_TX            1
+#define DMAC_DAC_TX            2
+#define DMAC_AC97_A_RX         3
+#define DMAC_AC97_A_TX         4
+#define DMAC_AC97_B_RX         5
+#define DMAC_AC97_B_TX         6
+#define DMAC_DMAREQ_0          7
+#define DMAC_DMAREQ_1          8
+#define DMAC_DMAREQ_2          9
+#define DMAC_DMAREQ_3          10
+
 #endif /* __ASM_ARCH_AT32AP700X_H__ */
index b617dac82969085a2217e9ee502e05e04ac9ca11..1399caf719aefcfffc2871976b184d967173396e 100644 (file)
@@ -1,57 +1,65 @@
 /*
- * kgdb.h: Defines and declarations for serial line source level
- *         remote debugging of the Linux kernel using gdb.
+ * include/asm-powerpc/kgdb.h
  *
+ * The PowerPC (32/64) specific defines / externs for KGDB.  Based on
+ * the previous 32bit and 64bit specific files, which had the following
+ * copyrights:
+ *
+ * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
+ * PPC Mods (C) 2004 Tom Rini (trini@mvista.com)
+ * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com)
  * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu)
  *
+ *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Author: Tom Rini <trini@kernel.crashing.org>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
  */
 #ifdef __KERNEL__
-#ifndef _PPC_KGDB_H
-#define _PPC_KGDB_H
+#ifndef __POWERPC_KGDB_H__
+#define __POWERPC_KGDB_H__
 
 #ifndef __ASSEMBLY__
 
-/* Things specific to the gen550 backend. */
-struct uart_port;
-
-extern void gen550_progress(char *, unsigned short);
-extern void gen550_kgdb_map_scc(void);
-extern void gen550_init(int, struct uart_port *);
-
-/* Things specific to the pmac backend. */
-extern void zs_kgdb_hook(int tty_num);
-
-/* To init the kgdb engine. (called by serial hook)*/
-extern void set_debug_traps(void);
-
-/* To enter the debugger explicitly. */
-extern void breakpoint(void);
-
-/* For taking exceptions
- * these are defined in traps.c
- */
-extern int (*debugger)(struct pt_regs *regs);
-extern int (*debugger_bpt)(struct pt_regs *regs);
-extern int (*debugger_sstep)(struct pt_regs *regs);
-extern int (*debugger_iabr_match)(struct pt_regs *regs);
-extern int (*debugger_dabr_match)(struct pt_regs *regs);
-extern void (*debugger_fault_handler)(struct pt_regs *regs);
-
-/* What we bring to the party */
-int kgdb_bpt(struct pt_regs *regs);
-int kgdb_sstep(struct pt_regs *regs);
-void kgdb(struct pt_regs *regs);
-int kgdb_iabr_match(struct pt_regs *regs);
-int kgdb_dabr_match(struct pt_regs *regs);
+#define BREAK_INSTR_SIZE       4
+#define BUFMAX                 ((NUMREGBYTES * 2) + 512)
+#define OUTBUFMAX              ((NUMREGBYTES * 2) + 512)
+static inline void arch_kgdb_breakpoint(void)
+{
+       asm(".long 0x7d821008"); /* twge r2, r2 */
+}
+#define CACHE_FLUSH_IS_SAFE    1
 
+/* The number bytes of registers we have to save depends on a few
+ * things.  For 64bit we default to not including vector registers and
+ * vector state registers. */
+#ifdef CONFIG_PPC64
 /*
- * external low-level support routines (ie macserial.c)
+ * 64 bit (8 byte) registers:
+ *   32 gpr, 32 fpr, nip, msr, link, ctr
+ * 32 bit (4 byte) registers:
+ *   ccr, xer, fpscr
  */
-extern void kgdb_interruptible(int); /* control interrupts from serial */
-extern void putDebugChar(char);   /* write a single character      */
-extern char getDebugChar(void);   /* read and return a single char */
-
+#define NUMREGBYTES            ((68 * 8) + (3 * 4))
+#define NUMCRITREGBYTES                184
+#else /* CONFIG_PPC32 */
+/* On non-E500 family PPC32 we determine the size by picking the last
+ * register we need, but on E500 we skip sections so we list what we
+ * need to store, and add it up. */
+#ifndef CONFIG_E500
+#define MAXREG                 (PT_FPSCR+1)
+#else
+/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
+#define MAXREG                 ((32*2)+6+2+1)
+#endif
+#define NUMREGBYTES            (MAXREG * sizeof(int))
+/* CR/LR, R1, R2, R13-R31 inclusive. */
+#define NUMCRITREGBYTES                (23 * sizeof(int))
+#endif /* 32/64 */
 #endif /* !(__ASSEMBLY__) */
-#endif /* !(_PPC_KGDB_H) */
+#endif /* !__POWERPC_KGDB_H__ */
 #endif /* __KERNEL__ */
index eb640f0acfacd629ee179c4b10e639d56a04b93a..0f50d4cc4360e65d3453704aa400448f99cb95a6 100644 (file)
@@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
 
 /**
  * async_tx_sync_epilog - actions to take if an operation is run synchronously
- * @flags: async_tx flags
- * @depend_tx: transaction depends on depend_tx
  * @cb_fn: function to call when the transaction completes
  * @cb_fn_param: parameter to pass to the callback routine
  */
 static inline void
-async_tx_sync_epilog(unsigned long flags,
-       struct dma_async_tx_descriptor *depend_tx,
-       dma_async_tx_callback cb_fn, void *cb_fn_param)
+async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param)
 {
        if (cb_fn)
                cb_fn(cb_fn_param);
-
-       if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
-               async_tx_ack(depend_tx);
 }
 
 void
@@ -152,4 +145,6 @@ struct dma_async_tx_descriptor *
 async_trigger_callback(enum async_tx_flags flags,
        struct dma_async_tx_descriptor *depend_tx,
        dma_async_tx_callback cb_fn, void *cb_fn_param);
+
+void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
 #endif /* _ASYNC_TX_H_ */
index af61cd1f37e96c1e019fd812c45a938de03c3944..b00a753eda53e6af1e50e7d892defdcb402913d8 100644 (file)
@@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb);
 #define DCA_PROVIDER_REMOVE  0x0002
 
 struct dca_provider {
+       struct list_head        node;
        struct dca_ops          *ops;
        struct device           *cd;
        int                      id;
@@ -18,7 +19,9 @@ struct dca_provider {
 struct dca_ops {
        int     (*add_requester)    (struct dca_provider *, struct device *);
        int     (*remove_requester) (struct dca_provider *, struct device *);
-       u8      (*get_tag)          (struct dca_provider *, int cpu);
+       u8      (*get_tag)          (struct dca_provider *, struct device *,
+                                    int cpu);
+       int     (*dev_managed)      (struct dca_provider *, struct device *);
 };
 
 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
@@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca)
 }
 
 /* Requester API */
+#define DCA_GET_TAG_TWO_ARGS
 int dca_add_requester(struct device *dev);
 int dca_remove_requester(struct device *dev);
 u8 dca_get_tag(int cpu);
+u8 dca3_get_tag(struct device *dev, int cpu);
 
 /* internal stuff */
 int __init dca_sysfs_init(void);
index d08a5c5eb928f3f2d8ef8639a518b0ac37fb6c89..adb0b084eb5a413c601d4a8489e006e061906840 100644 (file)
@@ -89,10 +89,23 @@ enum dma_transaction_type {
        DMA_MEMSET,
        DMA_MEMCPY_CRC32C,
        DMA_INTERRUPT,
+       DMA_SLAVE,
 };
 
 /* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
+#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
+
+/**
+ * enum dma_slave_width - DMA slave register access width.
+ * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
+ */
+enum dma_slave_width {
+       DMA_SLAVE_WIDTH_8BIT,
+       DMA_SLAVE_WIDTH_16BIT,
+       DMA_SLAVE_WIDTH_32BIT,
+};
 
 /**
  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -102,10 +115,14 @@ enum dma_transaction_type {
  * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
  *     acknowledges receipt, i.e. has has a chance to establish any
  *     dependency chains
+ * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
+ * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
  */
 enum dma_ctrl_flags {
        DMA_PREP_INTERRUPT = (1 << 0),
        DMA_CTRL_ACK = (1 << 1),
+       DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
+       DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
 };
 
 /**
@@ -114,6 +131,32 @@ enum dma_ctrl_flags {
  */
 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
 
+/**
+ * struct dma_slave - Information about a DMA slave
+ * @dev: device acting as DMA slave
+ * @dma_dev: required DMA master device. If non-NULL, the client can not be
+ *     bound to other masters than this.
+ * @tx_reg: physical address of data register used for
+ *     memory-to-peripheral transfers
+ * @rx_reg: physical address of data register used for
+ *     peripheral-to-memory transfers
+ * @reg_width: peripheral register width
+ *
+ * If dma_dev is non-NULL, the client can not be bound to other DMA
+ * masters than the one corresponding to this device. The DMA master
+ * driver may use this to determine if there is controller-specific
+ * data wrapped around this struct. Drivers of platform code that sets
+ * the dma_dev field must therefore make sure to use an appropriate
+ * controller-specific dma slave structure wrapping this struct.
+ */
+struct dma_slave {
+       struct device           *dev;
+       struct device           *dma_dev;
+       dma_addr_t              tx_reg;
+       dma_addr_t              rx_reg;
+       enum dma_slave_width    reg_width;
+};
+
 /**
  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
  * @refcount: local_t used for open-coded "bigref" counting
@@ -139,6 +182,7 @@ struct dma_chan_percpu {
  * @rcu: the DMA channel's RCU head
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
+ * @client-count: how many clients are using this channel
  */
 struct dma_chan {
        struct dma_device *device;
@@ -154,6 +198,7 @@ struct dma_chan {
 
        struct list_head device_node;
        struct dma_chan_percpu *local;
+       int client_count;
 };
 
 #define to_dma_chan(p) container_of(p, struct dma_chan, dev)
@@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
  * @event_callback: func ptr to call when something happens
  * @cap_mask: only return channels that satisfy the requested capabilities
  *  a value of zero corresponds to any capability
+ * @slave: data for preparing slave transfer. Must be non-NULL iff the
+ *  DMA_SLAVE capability is requested.
  * @global_node: list_head for global dma_client_list
  */
 struct dma_client {
        dma_event_callback      event_callback;
        dma_cap_mask_t          cap_mask;
+       struct dma_slave        *slave;
        struct list_head        global_node;
 };
 
@@ -263,6 +311,8 @@ struct dma_async_tx_descriptor {
  * @device_prep_dma_zero_sum: prepares a zero_sum operation
  * @device_prep_dma_memset: prepares a memset operation
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_slave_sg: prepares a slave dma operation
+ * @device_terminate_all: terminate all pending operations
  * @device_issue_pending: push pending transactions to hardware
  */
 struct dma_device {
@@ -279,7 +329,8 @@ struct dma_device {
        int dev_id;
        struct device *dev;
 
-       int (*device_alloc_chan_resources)(struct dma_chan *chan);
+       int (*device_alloc_chan_resources)(struct dma_chan *chan,
+                       struct dma_client *client);
        void (*device_free_chan_resources)(struct dma_chan *chan);
 
        struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -297,6 +348,12 @@ struct dma_device {
        struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
                struct dma_chan *chan, unsigned long flags);
 
+       struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned long flags);
+       void (*device_terminate_all)(struct dma_chan *chan);
+
        enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
                        dma_cookie_t cookie, dma_cookie_t *last,
                        dma_cookie_t *used);
@@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
        struct dma_chan *chan);
 
-static inline void
-async_tx_ack(struct dma_async_tx_descriptor *tx)
+static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
 {
        tx->flags |= DMA_CTRL_ACK;
 }
 
-static inline int
-async_tx_test_ack(struct dma_async_tx_descriptor *tx)
+static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
 {
-       return tx->flags & DMA_CTRL_ACK;
+       return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
 }
 
 #define first_dma_cap(mask) __first_dma_cap(&(mask))
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
new file mode 100644 (file)
index 0000000..04d217b
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
+ * AVR32 systems.)
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DW_DMAC_H
+#define DW_DMAC_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ */
+struct dw_dma_platform_data {
+       unsigned int    nr_channels;
+};
+
+/**
+ * struct dw_dma_slave - Controller-specific information about a slave
+ * @slave: Generic information about the slave
+ * @ctl_lo: Platform-specific initializer for the CTL_LO register
+ * @cfg_hi: Platform-specific initializer for the CFG_HI register
+ * @cfg_lo: Platform-specific initializer for the CFG_LO register
+ */
+struct dw_dma_slave {
+       struct dma_slave        slave;
+       u32                     cfg_hi;
+       u32                     cfg_lo;
+};
+
+/* Platform-configurable bits in CFG_HI */
+#define DWC_CFGH_FCMODE                (1 << 0)
+#define DWC_CFGH_FIFO_MODE     (1 << 1)
+#define DWC_CFGH_PROTCTL(x)    ((x) << 2)
+#define DWC_CFGH_SRC_PER(x)    ((x) << 7)
+#define DWC_CFGH_DST_PER(x)    ((x) << 11)
+
+/* Platform-configurable bits in CFG_LO */
+#define DWC_CFGL_PRIO(x)       ((x) << 5)      /* priority */
+#define DWC_CFGL_LOCK_CH_XFER  (0 << 12)       /* scope of LOCK_CH */
+#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
+#define DWC_CFGL_LOCK_CH_XACT  (2 << 12)
+#define DWC_CFGL_LOCK_BUS_XFER (0 << 14)       /* scope of LOCK_BUS */
+#define DWC_CFGL_LOCK_BUS_BLOCK        (1 << 14)
+#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
+#define DWC_CFGL_LOCK_CH       (1 << 15)       /* channel lockout */
+#define DWC_CFGL_LOCK_BUS      (1 << 16)       /* busmaster lockout */
+#define DWC_CFGL_HS_DST_POL    (1 << 18)       /* dst handshake active low */
+#define DWC_CFGL_HS_SRC_POL    (1 << 19)       /* src handshake active low */
+
+static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
+{
+       return container_of(slave, struct dw_dma_slave, slave);
+}
+
+#endif /* DW_DMAC_H */
index fe56b86f2c67cc782eb8849324e889805cd9f849..ac4e678a04edb497f9194b7eb4939c88bf8bec10 100644 (file)
@@ -512,7 +512,7 @@ struct hid_descriptor {
 
 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */
 /* We ignore a few input applications that are not widely used */
-#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001))
+#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
 
 /* HID core API */
 
index 4726126f5a59875a831084062e4430ddf73b79a3..d67ccca2b96472b229ffd264642ff420aae66cba 100644 (file)
@@ -178,6 +178,7 @@ typedef struct hw_regs_s {
        ide_ack_intr_t  *ack_intr;              /* acknowledge interrupt */
        hwif_chipset_t  chipset;
        struct device   *dev, *parent;
+       unsigned long   config;
 } hw_regs_t;
 
 void ide_init_port_data(struct hwif_s *, unsigned int);
@@ -307,7 +308,65 @@ struct ide_acpi_drive_link;
 struct ide_acpi_hwif_link;
 #endif
 
-typedef struct ide_drive_s {
+/* ATAPI device flags */
+enum {
+       IDE_AFLAG_DRQ_INTERRUPT         = (1 << 0),
+       IDE_AFLAG_MEDIA_CHANGED         = (1 << 1),
+
+       /* ide-cd */
+       /* Drive cannot lock the door. */
+       IDE_AFLAG_NO_DOORLOCK           = (1 << 2),
+       /* Drive cannot eject the disc. */
+       IDE_AFLAG_NO_EJECT              = (1 << 3),
+       /* Drive is a pre ATAPI 1.2 drive. */
+       IDE_AFLAG_PRE_ATAPI12           = (1 << 4),
+       /* TOC addresses are in BCD. */
+       IDE_AFLAG_TOCADDR_AS_BCD        = (1 << 5),
+       /* TOC track numbers are in BCD. */
+       IDE_AFLAG_TOCTRACKS_AS_BCD      = (1 << 6),
+       /*
+        * Drive does not provide data in multiples of SECTOR_SIZE
+        * when more than one interrupt is needed.
+        */
+       IDE_AFLAG_LIMIT_NFRAMES         = (1 << 7),
+       /* Seeking in progress. */
+       IDE_AFLAG_SEEKING               = (1 << 8),
+       /* Saved TOC information is current. */
+       IDE_AFLAG_TOC_VALID             = (1 << 9),
+       /* We think that the drive door is locked. */
+       IDE_AFLAG_DOOR_LOCKED           = (1 << 10),
+       /* SET_CD_SPEED command is unsupported. */
+       IDE_AFLAG_NO_SPEED_SELECT       = (1 << 11),
+       IDE_AFLAG_VERTOS_300_SSD        = (1 << 12),
+       IDE_AFLAG_VERTOS_600_ESD        = (1 << 13),
+       IDE_AFLAG_SANYO_3CD             = (1 << 14),
+       IDE_AFLAG_FULL_CAPS_PAGE        = (1 << 15),
+       IDE_AFLAG_PLAY_AUDIO_OK         = (1 << 16),
+       IDE_AFLAG_LE_SPEED_FIELDS       = (1 << 17),
+
+       /* ide-floppy */
+       /* Format in progress */
+       IDE_AFLAG_FORMAT_IN_PROGRESS    = (1 << 18),
+       /* Avoid commands not supported in Clik drive */
+       IDE_AFLAG_CLIK_DRIVE            = (1 << 19),
+       /* Requires BH algorithm for packets */
+       IDE_AFLAG_ZIP_DRIVE             = (1 << 20),
+
+       /* ide-tape */
+       IDE_AFLAG_IGNORE_DSC            = (1 << 21),
+       /* 0 When the tape position is unknown */
+       IDE_AFLAG_ADDRESS_VALID         = (1 << 22),
+       /* Device already opened */
+       IDE_AFLAG_BUSY                  = (1 << 23),
+       /* Attempt to auto-detect the current user block size */
+       IDE_AFLAG_DETECT_BS             = (1 << 24),
+       /* Currently on a filemark */
+       IDE_AFLAG_FILEMARK              = (1 << 25),
+       /* 0 = no tape is loaded, so we don't rewind after ejecting */
+       IDE_AFLAG_MEDIUM_PRESENT        = (1 << 26)
+};
+
+struct ide_drive_s {
        char            name[4];        /* drive name, such as "hda" */
         char            driver_req[10];        /* requests specific driver */
 
@@ -355,7 +414,6 @@ typedef struct ide_drive_s {
        unsigned nodma          : 1;    /* disallow DMA */
        unsigned remap_0_to_1   : 1;    /* 0=noremap, 1=remap 0->1 (for EZDrive) */
        unsigned blocked        : 1;    /* 1=powermanagment told us not to do anything, so sleep nicely */
-       unsigned vdma           : 1;    /* 1=doing PIO over DMA 0=doing normal DMA */
        unsigned scsi           : 1;    /* 0=default, 1=ide-scsi emulation */
        unsigned sleeping       : 1;    /* 1=sleeping & sleep field valid */
        unsigned post_reset     : 1;
@@ -400,7 +458,14 @@ typedef struct ide_drive_s {
        struct list_head list;
        struct device   gendev;
        struct completion gendev_rel_comp;      /* to deal with device release() */
-} ide_drive_t;
+
+       /* callback for packet commands */
+       void (*pc_callback)(struct ide_drive_s *);
+
+       unsigned long atapi_flags;
+};
+
+typedef struct ide_drive_s ide_drive_t;
 
 #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
 
@@ -408,8 +473,28 @@ typedef struct ide_drive_s {
     ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
 #define IDE_CHIPSET_IS_PCI(c)  ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
 
+struct ide_task_s;
 struct ide_port_info;
 
+struct ide_tp_ops {
+       void    (*exec_command)(struct hwif_s *, u8);
+       u8      (*read_status)(struct hwif_s *);
+       u8      (*read_altstatus)(struct hwif_s *);
+       u8      (*read_sff_dma_status)(struct hwif_s *);
+
+       void    (*set_irq)(struct hwif_s *, int);
+
+       void    (*tf_load)(ide_drive_t *, struct ide_task_s *);
+       void    (*tf_read)(ide_drive_t *, struct ide_task_s *);
+
+       void    (*input_data)(ide_drive_t *, struct request *, void *,
+                             unsigned int);
+       void    (*output_data)(ide_drive_t *, struct request *, void *,
+                              unsigned int);
+};
+
+extern const struct ide_tp_ops default_tp_ops;
+
 struct ide_port_ops {
        /* host specific initialization of a device */
        void    (*init_dev)(ide_drive_t *);
@@ -447,8 +532,6 @@ struct ide_dma_ops {
        void    (*dma_timeout)(struct ide_drive_s *);
 };
 
-struct ide_task_s;
-
 typedef struct hwif_s {
        struct hwif_s *next;            /* for linked-list in ide_hwgroup_t */
        struct hwif_s *mate;            /* other hwif from same PCI chip */
@@ -486,22 +569,12 @@ typedef struct hwif_s {
 
        void (*rw_disk)(ide_drive_t *, struct request *);
 
+       const struct ide_tp_ops         *tp_ops;
        const struct ide_port_ops       *port_ops;
        const struct ide_dma_ops        *dma_ops;
 
-       void (*tf_load)(ide_drive_t *, struct ide_task_s *);
-       void (*tf_read)(ide_drive_t *, struct ide_task_s *);
-
-       void (*input_data)(ide_drive_t *, struct request *, void *, unsigned);
-       void (*output_data)(ide_drive_t *, struct request *, void *, unsigned);
-
        void (*ide_dma_clear_irq)(ide_drive_t *drive);
 
-       void (*OUTB)(u8 addr, unsigned long port);
-       void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port);
-
-       u8  (*INB)(unsigned long port);
-
        /* dma physical region descriptor table (cpu view) */
        unsigned int    *dmatable_cpu;
        /* dma physical region descriptor table (dma view) */
@@ -524,8 +597,6 @@ typedef struct hwif_s {
        int             irq;            /* our irq number */
 
        unsigned long   dma_base;       /* base addr for dma ports */
-       unsigned long   dma_command;    /* dma command register */
-       unsigned long   dma_status;     /* dma status register */
 
        unsigned long   config_data;    /* for use by chipset-specific code */
        unsigned long   select_data;    /* for use by chipset-specific code */
@@ -552,6 +623,11 @@ typedef struct hwif_s {
 #endif
 } ____cacheline_internodealigned_in_smp ide_hwif_t;
 
+struct ide_host {
+       ide_hwif_t      *ports[MAX_HWIFS];
+       unsigned int    n_ports;
+};
+
 /*
  *  internal ide interrupt handler type
  */
@@ -611,8 +687,6 @@ enum {
        PC_FLAG_WRITING                 = (1 << 6),
        /* command timed out */
        PC_FLAG_TIMEDOUT                = (1 << 7),
-       PC_FLAG_ZIP_DRIVE               = (1 << 8),
-       PC_FLAG_DRQ_INTERRUPT           = (1 << 9),
 };
 
 struct ide_atapi_pc {
@@ -646,8 +720,6 @@ struct ide_atapi_pc {
         */
        u8 pc_buf[256];
 
-       void (*callback)(ide_drive_t *);
-
        /* idetape only */
        struct idetape_bh *bh;
        char *b_data;
@@ -807,13 +879,6 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
 extern int ide_vlb_clk;
 extern int ide_pci_clk;
 
-ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
-
-static inline ide_hwif_t *ide_find_port(void)
-{
-       return ide_find_port_slot(NULL);
-}
-
 extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
                             int uptodate, int nr_sectors);
@@ -884,6 +949,7 @@ enum {
        IDE_TFLAG_IN_HOB                = IDE_TFLAG_IN_HOB_FEATURE |
                                          IDE_TFLAG_IN_HOB_NSECT |
                                          IDE_TFLAG_IN_HOB_LBA,
+       IDE_TFLAG_IN_FEATURE            = (1 << 1),
        IDE_TFLAG_IN_NSECT              = (1 << 25),
        IDE_TFLAG_IN_LBAL               = (1 << 26),
        IDE_TFLAG_IN_LBAM               = (1 << 27),
@@ -948,9 +1014,25 @@ typedef struct ide_task_s {
 
 void ide_tf_dump(const char *, struct ide_taskfile *);
 
+void ide_exec_command(ide_hwif_t *, u8);
+u8 ide_read_status(ide_hwif_t *);
+u8 ide_read_altstatus(ide_hwif_t *);
+u8 ide_read_sff_dma_status(ide_hwif_t *);
+
+void ide_set_irq(ide_hwif_t *, int);
+
+void ide_tf_load(ide_drive_t *, ide_task_t *);
+void ide_tf_read(ide_drive_t *, ide_task_t *);
+
+void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
+void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
+
 extern void SELECT_DRIVE(ide_drive_t *);
 void SELECT_MASK(ide_drive_t *, int);
 
+u8 ide_read_error(ide_drive_t *);
+void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
+
 extern int drive_is_ready(ide_drive_t *);
 
 void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
@@ -1000,12 +1082,15 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o
 #define ide_pci_register_driver(d) pci_register_driver(d)
 #endif
 
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *);
+void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
+                        hw_regs_t *, hw_regs_t **);
 void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
 int ide_pci_set_master(struct pci_dev *, const char *);
 unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
+extern const struct ide_dma_ops sff_dma_ops;
+int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
 int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
 #else
 static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
@@ -1015,10 +1100,6 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
 }
 #endif
 
-extern void default_hwif_iops(ide_hwif_t *);
-extern void default_hwif_mmiops(ide_hwif_t *);
-extern void default_hwif_transport(ide_hwif_t *);
-
 typedef struct ide_pci_enablebit_s {
        u8      reg;    /* byte pci reg holding the enable-bit */
        u8      mask;   /* mask to isolate the enable-bit */
@@ -1081,7 +1162,6 @@ enum {
        IDE_HFLAG_IO_32BIT              = (1 << 24),
        /* unmask IRQs */
        IDE_HFLAG_UNMASK_IRQS           = (1 << 25),
-       IDE_HFLAG_ABUSE_SET_DMA_MODE    = (1 << 26),
        /* serialize ports if DMA is possible (for sl82c105) */
        IDE_HFLAG_SERIALIZE_DMA         = (1 << 27),
        /* force host out of "simplex" mode */
@@ -1092,8 +1172,6 @@ enum {
        IDE_HFLAG_NO_IO_32BIT           = (1 << 30),
        /* never unmask IRQs */
        IDE_HFLAG_NO_UNMASK_IRQS        = (1 << 31),
-       /* host uses VDMA (disabled for now) */
-       IDE_HFLAG_VDMA                  = 0,
 };
 
 #ifdef CONFIG_BLK_DEV_OFFBOARD
@@ -1110,6 +1188,7 @@ struct ide_port_info {
        int                     (*init_dma)(ide_hwif_t *,
                                            const struct ide_port_info *);
 
+       const struct ide_tp_ops         *tp_ops;
        const struct ide_port_ops       *port_ops;
        const struct ide_dma_ops        *dma_ops;
 
@@ -1163,7 +1242,6 @@ void ide_destroy_dmatable(ide_drive_t *);
 extern int ide_build_dmatable(ide_drive_t *, struct request *);
 int ide_allocate_dma_engine(ide_hwif_t *);
 void ide_release_dma_engine(ide_hwif_t *);
-void ide_setup_dma(ide_hwif_t *, unsigned long);
 
 void ide_dma_host_set(ide_drive_t *, int);
 extern int ide_dma_setup(ide_drive_t *);
@@ -1217,8 +1295,14 @@ void ide_undecoded_slave(ide_drive_t *);
 
 void ide_port_apply_params(ide_hwif_t *);
 
-int ide_device_add_all(u8 *idx, const struct ide_port_info *);
-int ide_device_add(u8 idx[4], const struct ide_port_info *);
+struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **);
+struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **);
+void ide_host_free(struct ide_host *);
+int ide_host_register(struct ide_host *, const struct ide_port_info *,
+                     hw_regs_t **);
+int ide_host_add(const struct ide_port_info *, hw_regs_t **,
+                struct ide_host **);
+void ide_host_remove(struct ide_host *);
 int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
 void ide_port_unregister_devices(ide_hwif_t *);
 void ide_port_scan(ide_hwif_t *);
@@ -1350,33 +1434,4 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
 
        return &hwif->drives[(drive->dn ^ 1) & 1];
 }
-
-static inline void ide_set_irq(ide_drive_t *drive, int on)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2),
-                      hwif->io_ports.ctl_addr);
-}
-
-static inline u8 ide_read_status(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       return hwif->INB(hwif->io_ports.status_addr);
-}
-
-static inline u8 ide_read_altstatus(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       return hwif->INB(hwif->io_ports.ctl_addr);
-}
-
-static inline u8 ide_read_error(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       return hwif->INB(hwif->io_ports.error_addr);
-}
 #endif /* _IDE_H */
index d8507eb394cf6f50e4c622b53d13e0c417b6467c..119ae7b8f028cb9795fddb16a28abc7530294764 100644 (file)
 #define PCI_DEVICE_ID_INTEL_ICH9_7     0x2916
 #define PCI_DEVICE_ID_INTEL_ICH9_8     0x2918
 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG4  0x3429
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG5  0x342a
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG6  0x342b
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG7  0x342c
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG0  0x3430
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG1  0x3431
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG2  0x3432
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG3  0x3433
 #define PCI_DEVICE_ID_INTEL_82830_HB   0x3575
 #define PCI_DEVICE_ID_INTEL_82830_CGC  0x3577
 #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
index 77a51be360103c98c8ce3a165668ea423f69d119..3cfc0fefb5ee671e1565862f1b54ee431af4eb8a 100644 (file)
@@ -217,6 +217,17 @@ void enable_irq(unsigned int irq)
 }
 EXPORT_SYMBOL(enable_irq);
 
+int set_irq_wake_real(unsigned int irq, unsigned int on)
+{
+       struct irq_desc *desc = irq_desc + irq;
+       int ret = -ENXIO;
+
+       if (desc->chip->set_wake)
+               ret = desc->chip->set_wake(irq, on);
+
+       return ret;
+}
+
 /**
  *     set_irq_wake - control irq power management wakeup
  *     @irq:   interrupt to control
@@ -233,30 +244,34 @@ int set_irq_wake(unsigned int irq, unsigned int on)
 {
        struct irq_desc *desc = irq_desc + irq;
        unsigned long flags;
-       int ret = -ENXIO;
-       int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
+       int ret = 0;
 
        /* wakeup-capable irqs can be shared between drivers that
         * don't need to have the same sleep mode behaviors.
         */
        spin_lock_irqsave(&desc->lock, flags);
        if (on) {
-               if (desc->wake_depth++ == 0)
-                       desc->status |= IRQ_WAKEUP;
-               else
-                       set_wake = NULL;
+               if (desc->wake_depth++ == 0) {
+                       ret = set_irq_wake_real(irq, on);
+                       if (ret)
+                               desc->wake_depth = 0;
+                       else
+                               desc->status |= IRQ_WAKEUP;
+               }
        } else {
                if (desc->wake_depth == 0) {
                        printk(KERN_WARNING "Unbalanced IRQ %d "
                                        "wake disable\n", irq);
                        WARN_ON(1);
-               } else if (--desc->wake_depth == 0)
-                       desc->status &= ~IRQ_WAKEUP;
-               else
-                       set_wake = NULL;
+               } else if (--desc->wake_depth == 0) {
+                       ret = set_irq_wake_real(irq, on);
+                       if (ret)
+                               desc->wake_depth = 1;
+                       else
+                               desc->status &= ~IRQ_WAKEUP;
+               }
        }
-       if (set_wake)
-               ret = desc->chip->set_wake(irq, on);
+
        spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
index a5d4b1dac2a574ed09870e2ce25b2e300f07a1d5..2cfd2721f7ed17f0e1e199082616c861bdd3c63e 100644 (file)
@@ -1,7 +1,4 @@
 
-config HAVE_ARCH_KGDB_SHADOW_INFO
-       bool
-
 config HAVE_ARCH_KGDB
        bool
 
index c77aff9c6eb3cc76fab911cdca097cc5add01d8c..8c6b706963ff01b02c87a54f2c70103e7f7f8f29 100644 (file)
@@ -34,6 +34,7 @@
 #define NET_DMA_DEFAULT_COPYBREAK 4096
 
 int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
+EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
 
 /**
  *     dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.