2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
140 #define DBGMSG(fmt, args...)
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 /* Module Parameters */
165 static int phys_dma = 1;
166 module_param(phys_dma, int, 0644);
167 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
169 static void dma_trm_tasklet(unsigned long data);
170 static void dma_trm_reset(struct dma_trm_ctx *d);
172 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
173 enum context_type type, int ctx, int num_desc,
174 int buf_size, int split_buf_size, int context_base);
175 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
176 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
178 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
179 enum context_type type, int ctx, int num_desc,
182 static void ohci1394_pci_remove(struct pci_dev *pdev);
184 #ifndef __LITTLE_ENDIAN
185 static unsigned hdr_sizes[] =
187 3, /* TCODE_WRITEQ */
188 4, /* TCODE_WRITEB */
189 3, /* TCODE_WRITE_RESPONSE */
193 3, /* TCODE_READQ_RESPONSE */
194 4, /* TCODE_READB_RESPONSE */
195 1, /* TCODE_CYCLE_START (???) */
196 4, /* TCODE_LOCK_REQUEST */
197 2, /* TCODE_ISO_DATA */
198 4, /* TCODE_LOCK_RESPONSE */
202 static inline void packet_swab(quadlet_t *data, int tcode)
204 size_t size = hdr_sizes[tcode];
206 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210 data[size] = swab32(data[size]);
213 /* Don't waste cycles on same sex byte swaps */
214 #define packet_swab(w,x)
215 #endif /* !LITTLE_ENDIAN */
217 /***********************************
218 * IEEE-1394 functionality section *
219 ***********************************/
221 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
227 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
229 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
231 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
232 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
238 r = reg_read(ohci, OHCI1394_PhyControl);
240 if (i >= OHCI_LOOP_COUNT)
241 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
242 r, r & 0x80000000, i);
244 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
246 return (r & 0x00ff0000) >> 16;
249 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
255 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
257 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
259 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
260 r = reg_read(ohci, OHCI1394_PhyControl);
261 if (!(r & 0x00004000))
267 if (i == OHCI_LOOP_COUNT)
268 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
269 r, r & 0x00004000, i);
271 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
276 /* Or's our value into the current value */
277 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 old = get_phy_reg (ohci, addr);
283 set_phy_reg (ohci, addr, old);
288 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
289 int phyid, int isroot)
291 quadlet_t *q = ohci->selfid_buf_cpu;
292 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296 /* Check status of self-id reception */
298 if (ohci->selfid_swap)
299 q0 = le32_to_cpu(q[0]);
303 if ((self_id_count & 0x80000000) ||
304 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
306 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
307 self_id_count, q0, ohci->self_id_errors);
309 /* Tip by James Goodwin <jamesg@Filanet.com>:
310 * We had an error, generate another bus reset in response. */
311 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
312 set_phy_reg_mask (ohci, 1, 0x40);
313 ohci->self_id_errors++;
316 "Too many errors on SelfID error reception, giving up!");
321 /* SelfID Ok, reset error counter. */
322 ohci->self_id_errors = 0;
324 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328 if (ohci->selfid_swap) {
329 q0 = le32_to_cpu(q[0]);
330 q1 = le32_to_cpu(q[1]);
337 DBGMSG ("SelfID packet 0x%x received", q0);
338 hpsb_selfid_received(host, cpu_to_be32(q0));
339 if (((q0 & 0x3f000000) >> 24) == phyid)
340 DBGMSG ("SelfID for this node is 0x%08x", q0);
343 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
349 DBGMSG("SelfID complete");
354 static void ohci_soft_reset(struct ti_ohci *ohci) {
357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
359 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
360 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364 DBGMSG ("Soft reset finished");
368 /* Generate the dma receive prgs and start the context */
369 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
371 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
374 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
376 for (i=0; i<d->num_desc; i++) {
379 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
385 /* End of descriptor list? */
386 if (i + 1 < d->num_desc) {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
394 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
395 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
401 if (d->type == DMA_CTX_ISO) {
402 /* Clear contextControl */
403 reg_write(ohci, d->ctrlClear, 0xffffffff);
405 /* Set bufferFill, isochHeader, multichannel for IR context */
406 reg_write(ohci, d->ctrlSet, 0xd0000000);
408 /* Set the context match register to match on all tags */
409 reg_write(ohci, d->ctxtMatch, 0xf0000000);
411 /* Clear the multi channel mask high and low registers */
412 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
413 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
415 /* Set up isoRecvIntMask to generate interrupts */
416 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
419 /* Tell the controller where the first AR program is */
420 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
423 reg_write(ohci, d->ctrlSet, 0x00008000);
425 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
428 /* Initialize the dma transmit context */
429 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
431 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
433 /* Stop the context */
434 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438 d->free_prgs = d->num_desc;
439 d->branchAddrPtr = NULL;
440 INIT_LIST_HEAD(&d->fifo_list);
441 INIT_LIST_HEAD(&d->pending_list);
443 if (d->type == DMA_CTX_ISO) {
444 /* enable interrupts */
445 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
448 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
451 /* Count the number of available iso contexts */
452 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
457 reg_write(ohci, reg, 0xffffffff);
458 tmp = reg_read(ohci, reg);
460 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
462 /* Count the number of contexts */
463 for (i=0; i<32; i++) {
470 /* Global initialization */
471 static void ohci_initialize(struct ti_ohci *ohci)
477 spin_lock_init(&ohci->phy_reg_lock);
479 /* Put some defaults to these undefined bus options */
480 buf = reg_read(ohci, OHCI1394_BusOptions);
481 buf |= 0x60000000; /* Enable CMC and ISC */
482 if (hpsb_disable_irm)
485 buf |= 0x80000000; /* Enable IRMC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets if appropriate. */
501 reg_write(ohci, OHCI1394_LinkControlSet,
502 OHCI1394_LinkControl_CycleTimerEnable |
503 OHCI1394_LinkControl_CycleMaster);
504 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
505 if (hpsb_disable_irm)
506 i &= ~PHY_04_CONTENDER;
508 i |= PHY_04_CONTENDER;
509 set_phy_reg(ohci, 4, i);
511 /* Set up self-id dma buffer */
512 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
514 /* enable self-id and phys */
515 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
516 OHCI1394_LinkControl_RcvPhyPkt);
518 /* Set the Config ROM mapping register */
519 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
521 /* Now get our max packet size */
522 ohci->max_packet_size =
523 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
525 /* Don't accept phy packets into AR request context */
526 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
532 /* Clear the interrupt mask */
533 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
536 /* Initialize AR dma */
537 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
540 /* Initialize AT dma */
541 initialize_dma_trm_ctx(&ohci->at_req_context);
542 initialize_dma_trm_ctx(&ohci->at_resp_context);
544 /* Initialize IR Legacy DMA channel mask */
545 ohci->ir_legacy_channels = 0;
548 * Accept AT requests from all nodes. This probably
549 * will have to be controlled from the subsystem
550 * on a per node basis.
552 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
554 /* Specify AT retries */
555 reg_write(ohci, OHCI1394_ATRetries,
556 OHCI1394_MAX_AT_REQ_RETRIES |
557 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
558 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
560 /* We don't want hardware swapping */
561 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
563 /* Enable interrupts */
564 reg_write(ohci, OHCI1394_IntMaskSet,
565 OHCI1394_unrecoverableError |
566 OHCI1394_masterIntEnable |
568 OHCI1394_selfIDComplete |
571 OHCI1394_respTxComplete |
572 OHCI1394_reqTxComplete |
575 OHCI1394_postedWriteErr |
576 OHCI1394_cycleInconsistent);
579 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
581 buf = reg_read(ohci, OHCI1394_Version);
583 sprintf (irq_buf, "%d", ohci->dev->irq);
585 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
587 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
588 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
589 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
590 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
591 pci_resource_start(ohci->dev, 0),
592 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
593 ohci->max_packet_size,
594 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
596 /* Check all of our ports to make sure that if anything is
597 * connected, we enable that port. */
598 num_ports = get_phy_reg(ohci, 2) & 0xf;
599 for (i = 0; i < num_ports; i++) {
602 set_phy_reg(ohci, 7, i);
603 status = get_phy_reg(ohci, 8);
606 set_phy_reg(ohci, 8, status & ~1);
609 /* Serial EEPROM Sanity check. */
610 if ((ohci->max_packet_size < 512) ||
611 (ohci->max_packet_size > 4096)) {
612 /* Serial EEPROM contents are suspect, set a sane max packet
613 * size and print the raw contents for bug reports if verbose
614 * debug is enabled. */
615 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
619 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
620 "attempting to setting max_packet_size to 512 bytes");
621 reg_write(ohci, OHCI1394_BusOptions,
622 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
623 ohci->max_packet_size = 512;
624 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
625 PRINT(KERN_DEBUG, " EEPROM Present: %d",
626 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
627 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
631 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
634 for (i = 0; i < 0x20; i++) {
635 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
636 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
637 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
644 * Insert a packet in the DMA fifo and generate the DMA prg
645 * FIXME: rewrite the program in order to accept packets crossing
647 * check also that a single dma descriptor doesn't cross a
650 static void insert_packet(struct ti_ohci *ohci,
651 struct dma_trm_ctx *d, struct hpsb_packet *packet)
654 int idx = d->prg_ind;
656 DBGMSG("Inserting packet for node " NODE_BUS_FMT
657 ", tlabel=%d, tcode=0x%x, speed=%d",
658 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
659 packet->tcode, packet->speed_code);
661 d->prg_cpu[idx]->begin.address = 0;
662 d->prg_cpu[idx]->begin.branchAddress = 0;
664 if (d->type == DMA_CTX_ASYNC_RESP) {
666 * For response packets, we need to put a timeout value in
667 * the 16 lower bits of the status... let's try 1 sec timeout
669 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
670 d->prg_cpu[idx]->begin.status = cpu_to_le32(
671 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
672 ((cycleTimer&0x01fff000)>>12));
674 DBGMSG("cycleTimer: %08x timeStamp: %08x",
675 cycleTimer, d->prg_cpu[idx]->begin.status);
677 d->prg_cpu[idx]->begin.status = 0;
679 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
681 if (packet->type == hpsb_raw) {
682 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
683 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
684 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
686 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
687 (packet->header[0] & 0xFFFF);
689 if (packet->tcode == TCODE_ISO_DATA) {
690 /* Sending an async stream packet */
691 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
693 /* Sending a normal async request or response */
694 d->prg_cpu[idx]->data[1] =
695 (packet->header[1] & 0xFFFF) |
696 (packet->header[0] & 0xFFFF0000);
697 d->prg_cpu[idx]->data[2] = packet->header[2];
698 d->prg_cpu[idx]->data[3] = packet->header[3];
700 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
703 if (packet->data_size) { /* block transmit */
704 if (packet->tcode == TCODE_STREAM_DATA){
705 d->prg_cpu[idx]->begin.control =
706 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
707 DMA_CTL_IMMEDIATE | 0x8);
709 d->prg_cpu[idx]->begin.control =
710 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
711 DMA_CTL_IMMEDIATE | 0x10);
713 d->prg_cpu[idx]->end.control =
714 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
719 * Check that the packet data buffer
720 * does not cross a page boundary.
722 * XXX Fix this some day. eth1394 seems to trigger
723 * it, but ignoring it doesn't seem to cause a
727 if (cross_bound((unsigned long)packet->data,
728 packet->data_size)>0) {
729 /* FIXME: do something about it */
731 "%s: packet data addr: %p size %Zd bytes "
732 "cross page boundary", __FUNCTION__,
733 packet->data, packet->data_size);
736 d->prg_cpu[idx]->end.address = cpu_to_le32(
737 pci_map_single(ohci->dev, packet->data,
740 OHCI_DMA_ALLOC("single, block transmit packet");
742 d->prg_cpu[idx]->end.branchAddress = 0;
743 d->prg_cpu[idx]->end.status = 0;
744 if (d->branchAddrPtr)
745 *(d->branchAddrPtr) =
746 cpu_to_le32(d->prg_bus[idx] | 0x3);
748 &(d->prg_cpu[idx]->end.branchAddress);
749 } else { /* quadlet transmit */
750 if (packet->type == hpsb_raw)
751 d->prg_cpu[idx]->begin.control =
752 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
756 (packet->header_size + 4));
758 d->prg_cpu[idx]->begin.control =
759 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
763 packet->header_size);
765 if (d->branchAddrPtr)
766 *(d->branchAddrPtr) =
767 cpu_to_le32(d->prg_bus[idx] | 0x2);
769 &(d->prg_cpu[idx]->begin.branchAddress);
772 } else { /* iso packet */
773 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
774 (packet->header[0] & 0xFFFF);
775 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
776 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
778 d->prg_cpu[idx]->begin.control =
779 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
780 DMA_CTL_IMMEDIATE | 0x8);
781 d->prg_cpu[idx]->end.control =
782 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
787 d->prg_cpu[idx]->end.address = cpu_to_le32(
788 pci_map_single(ohci->dev, packet->data,
789 packet->data_size, PCI_DMA_TODEVICE));
790 OHCI_DMA_ALLOC("single, iso transmit packet");
792 d->prg_cpu[idx]->end.branchAddress = 0;
793 d->prg_cpu[idx]->end.status = 0;
794 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
795 " begin=%08x %08x %08x %08x\n"
796 " %08x %08x %08x %08x\n"
797 " end =%08x %08x %08x %08x",
798 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
799 d->prg_cpu[idx]->begin.control,
800 d->prg_cpu[idx]->begin.address,
801 d->prg_cpu[idx]->begin.branchAddress,
802 d->prg_cpu[idx]->begin.status,
803 d->prg_cpu[idx]->data[0],
804 d->prg_cpu[idx]->data[1],
805 d->prg_cpu[idx]->data[2],
806 d->prg_cpu[idx]->data[3],
807 d->prg_cpu[idx]->end.control,
808 d->prg_cpu[idx]->end.address,
809 d->prg_cpu[idx]->end.branchAddress,
810 d->prg_cpu[idx]->end.status);
811 if (d->branchAddrPtr)
812 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
813 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
817 /* queue the packet in the appropriate context queue */
818 list_add_tail(&packet->driver_list, &d->fifo_list);
819 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
823 * This function fills the FIFO with the (eventual) pending packets
824 * and runs or wakes up the DMA prg if necessary.
826 * The function MUST be called with the d->lock held.
828 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
830 struct hpsb_packet *packet, *ptmp;
831 int idx = d->prg_ind;
834 /* insert the packets into the dma fifo */
835 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
839 /* For the first packet only */
841 z = (packet->data_size) ? 3 : 2;
843 /* Insert the packet */
844 list_del_init(&packet->driver_list);
845 insert_packet(ohci, d, packet);
848 /* Nothing must have been done, either no free_prgs or no packets */
852 /* Is the context running ? (should be unless it is
853 the first packet to be sent in this context) */
854 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
855 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
857 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
858 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
860 /* Check that the node id is valid, and not 63 */
861 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
862 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
864 reg_write(ohci, d->ctrlSet, 0x8000);
866 /* Wake up the dma context if necessary */
867 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
868 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
870 /* do this always, to avoid race condition */
871 reg_write(ohci, d->ctrlSet, 0x1000);
877 /* Transmission of an async or iso packet */
878 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
880 struct ti_ohci *ohci = host->hostdata;
881 struct dma_trm_ctx *d;
884 if (packet->data_size > ohci->max_packet_size) {
886 "Transmit packet size %Zd is too big",
891 /* Decide whether we have an iso, a request, or a response packet */
892 if (packet->type == hpsb_raw)
893 d = &ohci->at_req_context;
894 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
895 /* The legacy IT DMA context is initialized on first
896 * use. However, the alloc cannot be run from
897 * interrupt context, so we bail out if that is the
898 * case. I don't see anyone sending ISO packets from
899 * interrupt context anyway... */
901 if (ohci->it_legacy_context.ohci == NULL) {
902 if (in_interrupt()) {
904 "legacy IT context cannot be initialized during interrupt");
908 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
909 DMA_CTX_ISO, 0, IT_NUM_DESC,
910 OHCI1394_IsoXmitContextBase) < 0) {
912 "error initializing legacy IT context");
916 initialize_dma_trm_ctx(&ohci->it_legacy_context);
919 d = &ohci->it_legacy_context;
920 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
921 d = &ohci->at_resp_context;
923 d = &ohci->at_req_context;
925 spin_lock_irqsave(&d->lock,flags);
927 list_add_tail(&packet->driver_list, &d->pending_list);
929 dma_trm_flush(ohci, d);
931 spin_unlock_irqrestore(&d->lock,flags);
936 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
938 struct ti_ohci *ohci = host->hostdata;
947 phy_reg = get_phy_reg(ohci, 5);
949 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
952 phy_reg = get_phy_reg(ohci, 1);
954 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
956 case SHORT_RESET_NO_FORCE_ROOT:
957 phy_reg = get_phy_reg(ohci, 1);
958 if (phy_reg & 0x80) {
960 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
963 phy_reg = get_phy_reg(ohci, 5);
965 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
967 case LONG_RESET_NO_FORCE_ROOT:
968 phy_reg = get_phy_reg(ohci, 1);
971 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
973 case SHORT_RESET_FORCE_ROOT:
974 phy_reg = get_phy_reg(ohci, 1);
975 if (!(phy_reg & 0x80)) {
977 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
980 phy_reg = get_phy_reg(ohci, 5);
982 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
984 case LONG_RESET_FORCE_ROOT:
985 phy_reg = get_phy_reg(ohci, 1);
987 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
994 case GET_CYCLE_COUNTER:
995 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
998 case SET_CYCLE_COUNTER:
999 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1003 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1006 case ACT_CYCLE_MASTER:
1008 /* check if we are root and other nodes are present */
1009 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1010 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1012 * enable cycleTimer, cycleMaster
1014 DBGMSG("Cycle master enabled");
1015 reg_write(ohci, OHCI1394_LinkControlSet,
1016 OHCI1394_LinkControl_CycleTimerEnable |
1017 OHCI1394_LinkControl_CycleMaster);
1020 /* disable cycleTimer, cycleMaster, cycleSource */
1021 reg_write(ohci, OHCI1394_LinkControlClear,
1022 OHCI1394_LinkControl_CycleTimerEnable |
1023 OHCI1394_LinkControl_CycleMaster |
1024 OHCI1394_LinkControl_CycleSource);
1028 case CANCEL_REQUESTS:
1029 DBGMSG("Cancel request received");
1030 dma_trm_reset(&ohci->at_req_context);
1031 dma_trm_reset(&ohci->at_resp_context);
1034 case ISO_LISTEN_CHANNEL:
1037 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1038 int ir_legacy_active;
1040 if (arg<0 || arg>63) {
1042 "%s: IS0 listen channel %d is out of range",
1047 mask = (u64)0x1<<arg;
1049 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1051 if (ohci->ISO_channel_usage & mask) {
1053 "%s: IS0 listen channel %d is already used",
1055 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1059 ir_legacy_active = ohci->ir_legacy_channels;
1061 ohci->ISO_channel_usage |= mask;
1062 ohci->ir_legacy_channels |= mask;
1064 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1066 if (!ir_legacy_active) {
1067 if (ohci1394_register_iso_tasklet(ohci,
1068 &ohci->ir_legacy_tasklet) < 0) {
1069 PRINT(KERN_ERR, "No IR DMA context available");
1073 /* the IR context can be assigned to any DMA context
1074 * by ohci1394_register_iso_tasklet */
1075 d->ctx = ohci->ir_legacy_tasklet.context;
1076 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1078 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1080 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1081 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1083 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1085 if (printk_ratelimit())
1086 DBGMSG("IR legacy activated");
1089 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1092 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1095 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1098 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1099 DBGMSG("Listening enabled on channel %d", arg);
1102 case ISO_UNLISTEN_CHANNEL:
1106 if (arg<0 || arg>63) {
1108 "%s: IS0 unlisten channel %d is out of range",
1113 mask = (u64)0x1<<arg;
1115 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1117 if (!(ohci->ISO_channel_usage & mask)) {
1119 "%s: IS0 unlisten channel %d is not used",
1121 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1125 ohci->ISO_channel_usage &= ~mask;
1126 ohci->ir_legacy_channels &= ~mask;
1129 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1132 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1135 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1136 DBGMSG("Listening disabled on channel %d", arg);
1138 if (ohci->ir_legacy_channels == 0) {
1139 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1140 DBGMSG("ISO legacy receive context stopped");
1146 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1153 /***********************************
1154 * rawiso ISO reception *
1155 ***********************************/
1158 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1159 buffer is split into "blocks" (regions described by one DMA
1160 descriptor). Each block must be one page or less in size, and
1161 must not cross a page boundary.
1163 There is one little wrinkle with buffer-fill mode: a packet that
1164 starts in the final block may wrap around into the first block. But
1165 the user API expects all packets to be contiguous. Our solution is
1166 to keep the very last page of the DMA buffer in reserve - if a
1167 packet spans the gap, we copy its tail into this page.
1170 struct ohci_iso_recv {
1171 struct ti_ohci *ohci;
1173 struct ohci1394_iso_tasklet task;
1176 enum { BUFFER_FILL_MODE = 0,
1177 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1179 /* memory and PCI mapping for the DMA descriptors */
1180 struct dma_prog_region prog;
1181 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1183 /* how many DMA blocks fit in the buffer */
1184 unsigned int nblocks;
1186 /* stride of DMA blocks */
1187 unsigned int buf_stride;
1189 /* number of blocks to batch between interrupts */
1190 int block_irq_interval;
1192 /* block that DMA will finish next */
1195 /* (buffer-fill only) block that the reader will release next */
1198 /* (buffer-fill only) bytes of buffer the reader has released,
1199 less than one block */
1202 /* (buffer-fill only) buffer offset at which the next packet will appear */
1205 /* OHCI DMA context control registers */
1206 u32 ContextControlSet;
1207 u32 ContextControlClear;
1212 static void ohci_iso_recv_task(unsigned long data);
1213 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1214 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1215 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1216 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1218 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1220 struct ti_ohci *ohci = iso->host->hostdata;
1221 struct ohci_iso_recv *recv;
1225 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1229 iso->hostdata = recv;
1231 recv->task_active = 0;
1232 dma_prog_region_init(&recv->prog);
1235 /* use buffer-fill mode, unless irq_interval is 1
1236 (note: multichannel requires buffer-fill) */
1238 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1239 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1240 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1242 recv->dma_mode = BUFFER_FILL_MODE;
1245 /* set nblocks, buf_stride, block_irq_interval */
1247 if (recv->dma_mode == BUFFER_FILL_MODE) {
1248 recv->buf_stride = PAGE_SIZE;
1250 /* one block per page of data in the DMA buffer, minus the final guard page */
1251 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1252 if (recv->nblocks < 3) {
1253 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1257 /* iso->irq_interval is in packets - translate that to blocks */
1258 if (iso->irq_interval == 1)
1259 recv->block_irq_interval = 1;
1261 recv->block_irq_interval = iso->irq_interval *
1262 ((recv->nblocks+1)/iso->buf_packets);
1263 if (recv->block_irq_interval*4 > recv->nblocks)
1264 recv->block_irq_interval = recv->nblocks/4;
1265 if (recv->block_irq_interval < 1)
1266 recv->block_irq_interval = 1;
1269 int max_packet_size;
1271 recv->nblocks = iso->buf_packets;
1272 recv->block_irq_interval = iso->irq_interval;
1273 if (recv->block_irq_interval * 4 > iso->buf_packets)
1274 recv->block_irq_interval = iso->buf_packets / 4;
1275 if (recv->block_irq_interval < 1)
1276 recv->block_irq_interval = 1;
1278 /* choose a buffer stride */
1279 /* must be a power of 2, and <= PAGE_SIZE */
1281 max_packet_size = iso->buf_size / iso->buf_packets;
1283 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1284 recv->buf_stride *= 2);
1286 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1287 recv->buf_stride > PAGE_SIZE) {
1288 /* this shouldn't happen, but anyway... */
1289 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1294 recv->block_reader = 0;
1295 recv->released_bytes = 0;
1296 recv->block_dma = 0;
1297 recv->dma_offset = 0;
1299 /* size of DMA program = one descriptor per block */
1300 if (dma_prog_region_alloc(&recv->prog,
1301 sizeof(struct dma_cmd) * recv->nblocks,
1305 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1307 ohci1394_init_iso_tasklet(&recv->task,
1308 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1310 ohci_iso_recv_task, (unsigned long) iso);
1312 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1317 recv->task_active = 1;
1319 /* recv context registers are spaced 32 bytes apart */
1320 ctx = recv->task.context;
1321 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1322 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1323 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1324 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1326 if (iso->channel == -1) {
1327 /* clear multi-channel selection mask */
1328 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1329 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1332 /* write the DMA program */
1333 ohci_iso_recv_program(iso);
1335 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1336 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1337 recv->dma_mode == BUFFER_FILL_MODE ?
1338 "buffer-fill" : "packet-per-buffer",
1339 iso->buf_size/PAGE_SIZE, iso->buf_size,
1340 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1345 ohci_iso_recv_shutdown(iso);
1349 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1351 struct ohci_iso_recv *recv = iso->hostdata;
1353 /* disable interrupts */
1354 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1357 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1360 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1362 struct ohci_iso_recv *recv = iso->hostdata;
1364 if (recv->task_active) {
1365 ohci_iso_recv_stop(iso);
1366 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1367 recv->task_active = 0;
1370 dma_prog_region_free(&recv->prog);
1372 iso->hostdata = NULL;
1375 /* set up a "gapped" ring buffer DMA program */
1376 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1378 struct ohci_iso_recv *recv = iso->hostdata;
1381 /* address of 'branch' field in previous DMA descriptor */
1382 u32 *prev_branch = NULL;
1384 for (blk = 0; blk < recv->nblocks; blk++) {
1387 /* the DMA descriptor */
1388 struct dma_cmd *cmd = &recv->block[blk];
1390 /* offset of the DMA descriptor relative to the DMA prog buffer */
1391 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1393 /* offset of this packet's data within the DMA buffer */
1394 unsigned long buf_offset = blk * recv->buf_stride;
1396 if (recv->dma_mode == BUFFER_FILL_MODE) {
1397 control = 2 << 28; /* INPUT_MORE */
1399 control = 3 << 28; /* INPUT_LAST */
1402 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1404 /* interrupt on last block, and at intervals */
1405 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1406 control |= 3 << 20; /* want interrupt */
1409 control |= 3 << 18; /* enable branch to address */
1410 control |= recv->buf_stride;
1412 cmd->control = cpu_to_le32(control);
1413 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1414 cmd->branchAddress = 0; /* filled in on next loop */
1415 cmd->status = cpu_to_le32(recv->buf_stride);
1417 /* link the previous descriptor to this one */
1419 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1422 prev_branch = &cmd->branchAddress;
1425 /* the final descriptor's branch address and Z should be left at 0 */
1428 /* listen or unlisten to a specific channel (multi-channel mode only) */
1429 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1431 struct ohci_iso_recv *recv = iso->hostdata;
1435 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1438 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1442 reg_write(recv->ohci, reg, (1 << i));
1444 /* issue a dummy read to force all PCI writes to be posted immediately */
1446 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1449 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1451 struct ohci_iso_recv *recv = iso->hostdata;
1454 for (i = 0; i < 64; i++) {
1455 if (mask & (1ULL << i)) {
1457 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1459 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1462 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1464 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1468 /* issue a dummy read to force all PCI writes to be posted immediately */
1470 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1473 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1475 struct ohci_iso_recv *recv = iso->hostdata;
1476 struct ti_ohci *ohci = recv->ohci;
1477 u32 command, contextMatch;
1479 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1482 /* always keep ISO headers */
1483 command = (1 << 30);
1485 if (recv->dma_mode == BUFFER_FILL_MODE)
1486 command |= (1 << 31);
1488 reg_write(recv->ohci, recv->ContextControlSet, command);
1490 /* match on specified tags */
1491 contextMatch = tag_mask << 28;
1493 if (iso->channel == -1) {
1494 /* enable multichannel reception */
1495 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1497 /* listen on channel */
1498 contextMatch |= iso->channel;
1504 /* enable cycleMatch */
1505 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1507 /* set starting cycle */
1510 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1511 just snarf them from the current time */
1512 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1514 /* advance one second to give some extra time for DMA to start */
1517 cycle |= (seconds & 3) << 13;
1519 contextMatch |= cycle << 12;
1523 /* set sync flag on first DMA descriptor */
1524 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1525 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1527 /* match sync field */
1528 contextMatch |= (sync&0xf)<<8;
1531 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1533 /* address of first descriptor block */
1534 command = dma_prog_region_offset_to_bus(&recv->prog,
1535 recv->block_dma * sizeof(struct dma_cmd));
1536 command |= 1; /* Z=1 */
1538 reg_write(recv->ohci, recv->CommandPtr, command);
1540 /* enable interrupts */
1541 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1546 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1548 /* issue a dummy read of the cycle timer register to force
1549 all PCI writes to be posted immediately */
1551 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1554 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1556 "Error starting IR DMA (ContextControl 0x%08x)\n",
1557 reg_read(recv->ohci, recv->ContextControlSet));
1564 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1566 /* re-use the DMA descriptor for the block */
1567 /* by linking the previous descriptor to it */
1570 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1572 struct dma_cmd *next = &recv->block[next_i];
1573 struct dma_cmd *prev = &recv->block[prev_i];
1575 /* ignore out-of-range requests */
1576 if ((block < 0) || (block > recv->nblocks))
1579 /* 'next' becomes the new end of the DMA chain,
1580 so disable branch and enable interrupt */
1581 next->branchAddress = 0;
1582 next->control |= cpu_to_le32(3 << 20);
1583 next->status = cpu_to_le32(recv->buf_stride);
1585 /* link prev to next */
1586 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1587 sizeof(struct dma_cmd) * next_i)
1590 /* disable interrupt on previous DMA descriptor, except at intervals */
1591 if ((prev_i % recv->block_irq_interval) == 0) {
1592 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1594 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1598 /* wake up DMA in case it fell asleep */
1599 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1602 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1603 struct hpsb_iso_packet_info *info)
1605 /* release the memory where the packet was */
1606 recv->released_bytes += info->total_len;
1608 /* have we released enough memory for one block? */
1609 while (recv->released_bytes > recv->buf_stride) {
1610 ohci_iso_recv_release_block(recv, recv->block_reader);
1611 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1612 recv->released_bytes -= recv->buf_stride;
1616 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1618 struct ohci_iso_recv *recv = iso->hostdata;
1619 if (recv->dma_mode == BUFFER_FILL_MODE) {
1620 ohci_iso_recv_bufferfill_release(recv, info);
1622 ohci_iso_recv_release_block(recv, info - iso->infos);
1626 /* parse all packets from blocks that have been fully received */
1627 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1631 struct ti_ohci *ohci = recv->ohci;
1634 /* we expect the next parsable packet to begin at recv->dma_offset */
1635 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1637 unsigned int offset;
1638 unsigned short len, cycle, total_len;
1639 unsigned char channel, tag, sy;
1641 unsigned char *p = iso->data_buf.kvirt;
1643 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1645 /* don't loop indefinitely */
1646 if (runaway++ > 100000) {
1647 atomic_inc(&iso->overflows);
1649 "IR DMA error - Runaway during buffer parsing!\n");
1653 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1654 if (this_block == recv->block_dma)
1659 /* parse data length, tag, channel, and sy */
1661 /* note: we keep our own local copies of 'len' and 'offset'
1662 so the user can't mess with them by poking in the mmap area */
1664 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1668 "IR DMA error - bogus 'len' value %u\n", len);
1671 channel = p[recv->dma_offset+1] & 0x3F;
1672 tag = p[recv->dma_offset+1] >> 6;
1673 sy = p[recv->dma_offset+0] & 0xF;
1675 /* advance to data payload */
1676 recv->dma_offset += 4;
1678 /* check for wrap-around */
1679 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1680 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1683 /* dma_offset now points to the first byte of the data payload */
1684 offset = recv->dma_offset;
1686 /* advance to xferStatus/timeStamp */
1687 recv->dma_offset += len;
1689 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1690 /* payload is padded to 4 bytes */
1692 recv->dma_offset += 4 - (len%4);
1693 total_len += 4 - (len%4);
1696 /* check for wrap-around */
1697 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1698 /* uh oh, the packet data wraps from the last
1699 to the first DMA block - make the packet
1700 contiguous by copying its "tail" into the
1703 int guard_off = recv->buf_stride*recv->nblocks;
1704 int tail_len = len - (guard_off - offset);
1706 if (tail_len > 0 && tail_len < recv->buf_stride) {
1707 memcpy(iso->data_buf.kvirt + guard_off,
1708 iso->data_buf.kvirt,
1712 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1715 /* parse timestamp */
1716 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1719 /* advance to next packet */
1720 recv->dma_offset += 4;
1722 /* check for wrap-around */
1723 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1724 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1727 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1734 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1737 struct ti_ohci *ohci = recv->ohci;
1739 /* loop over all blocks */
1740 for (loop = 0; loop < recv->nblocks; loop++) {
1742 /* check block_dma to see if it's done */
1743 struct dma_cmd *im = &recv->block[recv->block_dma];
1745 /* check the DMA descriptor for new writes to xferStatus */
1746 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1748 /* rescount is the number of bytes *remaining to be written* in the block */
1749 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1751 unsigned char event = xferstatus & 0x1F;
1754 /* nothing has happened to this block yet */
1758 if (event != 0x11) {
1759 atomic_inc(&iso->overflows);
1761 "IR DMA error - OHCI error code 0x%02x\n", event);
1764 if (rescount != 0) {
1765 /* the card is still writing to this block;
1766 we can't touch it until it's done */
1770 /* OK, the block is finished... */
1772 /* sync our view of the block */
1773 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1775 /* reset the DMA descriptor */
1776 im->status = recv->buf_stride;
1778 /* advance block_dma */
1779 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1781 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1782 atomic_inc(&iso->overflows);
1783 DBGMSG("ISO reception overflow - "
1784 "ran out of DMA blocks");
1788 /* parse any packets that have arrived */
1789 ohci_iso_recv_bufferfill_parse(iso, recv);
1792 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1796 struct ti_ohci *ohci = recv->ohci;
1798 /* loop over the entire buffer */
1799 for (count = 0; count < recv->nblocks; count++) {
1802 /* pointer to the DMA descriptor */
1803 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1805 /* check the DMA descriptor for new writes to xferStatus */
1806 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1807 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1809 unsigned char event = xferstatus & 0x1F;
1812 /* this packet hasn't come in yet; we are done for now */
1816 if (event == 0x11) {
1817 /* packet received successfully! */
1819 /* rescount is the number of bytes *remaining* in the packet buffer,
1820 after the packet was written */
1821 packet_len = recv->buf_stride - rescount;
1823 } else if (event == 0x02) {
1824 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1826 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1829 /* sync our view of the buffer */
1830 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1832 /* record the per-packet info */
1834 /* iso header is 8 bytes ahead of the data payload */
1837 unsigned int offset;
1838 unsigned short cycle;
1839 unsigned char channel, tag, sy;
1841 offset = iso->pkt_dma * recv->buf_stride;
1842 hdr = iso->data_buf.kvirt + offset;
1844 /* skip iso header */
1848 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1849 channel = hdr[5] & 0x3F;
1853 hpsb_iso_packet_received(iso, offset, packet_len,
1854 recv->buf_stride, cycle, channel, tag, sy);
1857 /* reset the DMA descriptor */
1858 il->status = recv->buf_stride;
1861 recv->block_dma = iso->pkt_dma;
1869 static void ohci_iso_recv_task(unsigned long data)
1871 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1872 struct ohci_iso_recv *recv = iso->hostdata;
1874 if (recv->dma_mode == BUFFER_FILL_MODE)
1875 ohci_iso_recv_bufferfill_task(iso, recv);
1877 ohci_iso_recv_packetperbuf_task(iso, recv);
1880 /***********************************
1881 * rawiso ISO transmission *
1882 ***********************************/
1884 struct ohci_iso_xmit {
1885 struct ti_ohci *ohci;
1886 struct dma_prog_region prog;
1887 struct ohci1394_iso_tasklet task;
1890 u32 ContextControlSet;
1891 u32 ContextControlClear;
1895 /* transmission DMA program:
1896 one OUTPUT_MORE_IMMEDIATE for the IT header
1897 one OUTPUT_LAST for the buffer data */
1899 struct iso_xmit_cmd {
1900 struct dma_cmd output_more_immediate;
1903 struct dma_cmd output_last;
1906 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1907 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1908 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1909 static void ohci_iso_xmit_task(unsigned long data);
1911 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1913 struct ohci_iso_xmit *xmit;
1914 unsigned int prog_size;
1918 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1922 iso->hostdata = xmit;
1923 xmit->ohci = iso->host->hostdata;
1924 xmit->task_active = 0;
1926 dma_prog_region_init(&xmit->prog);
1928 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1930 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1933 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1934 ohci_iso_xmit_task, (unsigned long) iso);
1936 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1941 xmit->task_active = 1;
1943 /* xmit context registers are spaced 16 bytes apart */
1944 ctx = xmit->task.context;
1945 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1946 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1947 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1952 ohci_iso_xmit_shutdown(iso);
1956 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1958 struct ohci_iso_xmit *xmit = iso->hostdata;
1959 struct ti_ohci *ohci = xmit->ohci;
1961 /* disable interrupts */
1962 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1965 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1966 /* XXX the DMA context will lock up if you try to send too much data! */
1968 "you probably exceeded the OHCI card's bandwidth limit - "
1969 "reload the module and reduce xmit bandwidth");
1973 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1975 struct ohci_iso_xmit *xmit = iso->hostdata;
1977 if (xmit->task_active) {
1978 ohci_iso_xmit_stop(iso);
1979 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1980 xmit->task_active = 0;
1983 dma_prog_region_free(&xmit->prog);
1985 iso->hostdata = NULL;
1988 static void ohci_iso_xmit_task(unsigned long data)
1990 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1991 struct ohci_iso_xmit *xmit = iso->hostdata;
1992 struct ti_ohci *ohci = xmit->ohci;
1996 /* check the whole buffer if necessary, starting at pkt_dma */
1997 for (count = 0; count < iso->buf_packets; count++) {
2000 /* DMA descriptor */
2001 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2003 /* check for new writes to xferStatus */
2004 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2005 u8 event = xferstatus & 0x1F;
2008 /* packet hasn't been sent yet; we are done for now */
2014 "IT DMA error - OHCI error code 0x%02x\n", event);
2016 /* at least one packet went out, so wake up the writer */
2020 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2022 /* tell the subsystem the packet has gone out */
2023 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2025 /* reset the DMA descriptor for next time */
2026 cmd->output_last.status = 0;
2033 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2035 struct ohci_iso_xmit *xmit = iso->hostdata;
2036 struct ti_ohci *ohci = xmit->ohci;
2039 struct iso_xmit_cmd *next, *prev;
2041 unsigned int offset;
2043 unsigned char tag, sy;
2045 /* check that the packet doesn't cross a page boundary
2046 (we could allow this if we added OUTPUT_MORE descriptor support) */
2047 if (cross_bound(info->offset, info->len)) {
2049 "rawiso xmit: packet %u crosses a page boundary",
2054 offset = info->offset;
2059 /* sync up the card's view of the buffer */
2060 dma_region_sync_for_device(&iso->data_buf, offset, len);
2062 /* append first_packet to the DMA chain */
2063 /* by linking the previous descriptor to it */
2064 /* (next will become the new end of the DMA chain) */
2066 next_i = iso->first_packet;
2067 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2069 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2070 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2072 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2073 memset(next, 0, sizeof(struct iso_xmit_cmd));
2074 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2076 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2078 /* tcode = 0xA, and sy */
2079 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2081 /* tag and channel number */
2082 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2084 /* transmission speed */
2085 next->iso_hdr[2] = iso->speed & 0x7;
2088 next->iso_hdr[6] = len & 0xFF;
2089 next->iso_hdr[7] = len >> 8;
2091 /* set up the OUTPUT_LAST */
2092 next->output_last.control = cpu_to_le32(1 << 28);
2093 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2094 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2095 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2096 next->output_last.control |= cpu_to_le32(len);
2098 /* payload bus address */
2099 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2101 /* leave branchAddress at zero for now */
2103 /* re-write the previous DMA descriptor to chain to this one */
2105 /* set prev branch address to point to next (Z=3) */
2106 prev->output_last.branchAddress = cpu_to_le32(
2107 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2109 /* disable interrupt, unless required by the IRQ interval */
2110 if (prev_i % iso->irq_interval) {
2111 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2113 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2118 /* wake DMA in case it is sleeping */
2119 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2121 /* issue a dummy read of the cycle timer to force all PCI
2122 writes to be posted immediately */
2124 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2129 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2131 struct ohci_iso_xmit *xmit = iso->hostdata;
2132 struct ti_ohci *ohci = xmit->ohci;
2134 /* clear out the control register */
2135 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2138 /* address and length of first descriptor block (Z=3) */
2139 reg_write(xmit->ohci, xmit->CommandPtr,
2140 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2144 u32 start = cycle & 0x1FFF;
2146 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2147 just snarf them from the current time */
2148 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2150 /* advance one second to give some extra time for DMA to start */
2153 start |= (seconds & 3) << 13;
2155 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2158 /* enable interrupts */
2159 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2162 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2165 /* wait 100 usec to give the card time to go active */
2168 /* check the RUN bit */
2169 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2170 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2171 reg_read(xmit->ohci, xmit->ContextControlSet));
2178 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2183 return ohci_iso_xmit_init(iso);
2185 return ohci_iso_xmit_start(iso, arg);
2187 ohci_iso_xmit_stop(iso);
2190 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2192 ohci_iso_xmit_shutdown(iso);
2196 return ohci_iso_recv_init(iso);
2198 int *args = (int*) arg;
2199 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2202 ohci_iso_recv_stop(iso);
2205 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2208 ohci_iso_recv_task((unsigned long) iso);
2211 ohci_iso_recv_shutdown(iso);
2213 case RECV_LISTEN_CHANNEL:
2214 ohci_iso_recv_change_channel(iso, arg, 1);
2216 case RECV_UNLISTEN_CHANNEL:
2217 ohci_iso_recv_change_channel(iso, arg, 0);
2219 case RECV_SET_CHANNEL_MASK:
2220 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2224 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2231 /***************************************
2232 * IEEE-1394 functionality section END *
2233 ***************************************/
2236 /********************************************************
2237 * Global stuff (interrupt handler, init/shutdown code) *
2238 ********************************************************/
2240 static void dma_trm_reset(struct dma_trm_ctx *d)
2242 unsigned long flags;
2243 LIST_HEAD(packet_list);
2244 struct ti_ohci *ohci = d->ohci;
2245 struct hpsb_packet *packet, *ptmp;
2247 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2249 /* Lock the context, reset it and release it. Move the packets
2250 * that were pending in the context to packet_list and free
2251 * them after releasing the lock. */
2253 spin_lock_irqsave(&d->lock, flags);
2255 list_splice(&d->fifo_list, &packet_list);
2256 list_splice(&d->pending_list, &packet_list);
2257 INIT_LIST_HEAD(&d->fifo_list);
2258 INIT_LIST_HEAD(&d->pending_list);
2260 d->branchAddrPtr = NULL;
2261 d->sent_ind = d->prg_ind;
2262 d->free_prgs = d->num_desc;
2264 spin_unlock_irqrestore(&d->lock, flags);
2266 if (list_empty(&packet_list))
2269 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2271 /* Now process subsystem callbacks for the packets from this
2273 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2274 list_del_init(&packet->driver_list);
2275 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2279 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2283 struct ohci1394_iso_tasklet *t;
2285 unsigned long flags;
2287 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2289 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2290 mask = 1 << t->context;
2292 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2293 tasklet_schedule(&t->tasklet);
2294 else if (rx_event & mask)
2295 tasklet_schedule(&t->tasklet);
2298 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2301 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2302 struct pt_regs *regs_are_unused)
2304 quadlet_t event, node_id;
2305 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2306 struct hpsb_host *host = ohci->host;
2307 int phyid = -1, isroot = 0;
2308 unsigned long flags;
2310 /* Read and clear the interrupt event register. Don't clear
2311 * the busReset event, though. This is done when we get the
2312 * selfIDComplete interrupt. */
2313 spin_lock_irqsave(&ohci->event_lock, flags);
2314 event = reg_read(ohci, OHCI1394_IntEventClear);
2315 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2316 spin_unlock_irqrestore(&ohci->event_lock, flags);
2321 /* If event is ~(u32)0 cardbus card was ejected. In this case
2322 * we just return, and clean up in the ohci1394_pci_remove
2324 if (event == ~(u32) 0) {
2325 DBGMSG("Device removed.");
2329 DBGMSG("IntEvent: %08x", event);
2331 if (event & OHCI1394_unrecoverableError) {
2333 PRINT(KERN_ERR, "Unrecoverable error!");
2335 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2336 PRINT(KERN_ERR, "Async Req Tx Context died: "
2337 "ctrl[%08x] cmdptr[%08x]",
2338 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2339 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2341 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2342 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2343 "ctrl[%08x] cmdptr[%08x]",
2344 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2345 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2347 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2348 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2349 "ctrl[%08x] cmdptr[%08x]",
2350 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2351 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2353 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2354 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2355 "ctrl[%08x] cmdptr[%08x]",
2356 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2357 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2359 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2360 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2361 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2362 "ctrl[%08x] cmdptr[%08x]", ctx,
2363 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2364 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2367 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2368 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2369 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2370 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2371 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2372 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2373 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2376 event &= ~OHCI1394_unrecoverableError;
2378 if (event & OHCI1394_postedWriteErr) {
2379 PRINT(KERN_ERR, "physical posted write error");
2380 /* no recovery strategy yet, had to involve protocol drivers */
2382 if (event & OHCI1394_cycleInconsistent) {
2383 /* We subscribe to the cycleInconsistent event only to
2384 * clear the corresponding event bit... otherwise,
2385 * isochronous cycleMatch DMA won't work. */
2386 DBGMSG("OHCI1394_cycleInconsistent");
2387 event &= ~OHCI1394_cycleInconsistent;
2389 if (event & OHCI1394_busReset) {
2390 /* The busReset event bit can't be cleared during the
2391 * selfID phase, so we disable busReset interrupts, to
2392 * avoid burying the cpu in interrupt requests. */
2393 spin_lock_irqsave(&ohci->event_lock, flags);
2394 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2396 if (ohci->check_busreset) {
2401 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2402 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2404 spin_unlock_irqrestore(&ohci->event_lock, flags);
2406 spin_lock_irqsave(&ohci->event_lock, flags);
2408 /* The loop counter check is to prevent the driver
2409 * from remaining in this state forever. For the
2410 * initial bus reset, the loop continues for ever
2411 * and the system hangs, until some device is plugged-in
2412 * or out manually into a port! The forced reset seems
2413 * to solve this problem. This mainly effects nForce2. */
2414 if (loop_count > 10000) {
2415 ohci_devctl(host, RESET_BUS, LONG_RESET);
2416 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2423 spin_unlock_irqrestore(&ohci->event_lock, flags);
2424 if (!host->in_bus_reset) {
2425 DBGMSG("irq_handler: Bus reset requested");
2427 /* Subsystem call */
2428 hpsb_bus_reset(ohci->host);
2430 event &= ~OHCI1394_busReset;
2432 if (event & OHCI1394_reqTxComplete) {
2433 struct dma_trm_ctx *d = &ohci->at_req_context;
2434 DBGMSG("Got reqTxComplete interrupt "
2435 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2436 if (reg_read(ohci, d->ctrlSet) & 0x800)
2437 ohci1394_stop_context(ohci, d->ctrlClear,
2440 dma_trm_tasklet((unsigned long)d);
2441 //tasklet_schedule(&d->task);
2442 event &= ~OHCI1394_reqTxComplete;
2444 if (event & OHCI1394_respTxComplete) {
2445 struct dma_trm_ctx *d = &ohci->at_resp_context;
2446 DBGMSG("Got respTxComplete interrupt "
2447 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2448 if (reg_read(ohci, d->ctrlSet) & 0x800)
2449 ohci1394_stop_context(ohci, d->ctrlClear,
2452 tasklet_schedule(&d->task);
2453 event &= ~OHCI1394_respTxComplete;
2455 if (event & OHCI1394_RQPkt) {
2456 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2457 DBGMSG("Got RQPkt interrupt status=0x%08X",
2458 reg_read(ohci, d->ctrlSet));
2459 if (reg_read(ohci, d->ctrlSet) & 0x800)
2460 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2462 tasklet_schedule(&d->task);
2463 event &= ~OHCI1394_RQPkt;
2465 if (event & OHCI1394_RSPkt) {
2466 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2467 DBGMSG("Got RSPkt interrupt status=0x%08X",
2468 reg_read(ohci, d->ctrlSet));
2469 if (reg_read(ohci, d->ctrlSet) & 0x800)
2470 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2472 tasklet_schedule(&d->task);
2473 event &= ~OHCI1394_RSPkt;
2475 if (event & OHCI1394_isochRx) {
2478 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2479 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2480 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2481 event &= ~OHCI1394_isochRx;
2483 if (event & OHCI1394_isochTx) {
2486 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2487 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2488 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2489 event &= ~OHCI1394_isochTx;
2491 if (event & OHCI1394_selfIDComplete) {
2492 if (host->in_bus_reset) {
2493 node_id = reg_read(ohci, OHCI1394_NodeID);
2495 if (!(node_id & 0x80000000)) {
2497 "SelfID received, but NodeID invalid "
2498 "(probably new bus reset occurred): %08X",
2500 goto selfid_not_valid;
2503 phyid = node_id & 0x0000003f;
2504 isroot = (node_id & 0x40000000) != 0;
2506 DBGMSG("SelfID interrupt received "
2507 "(phyid %d, %s)", phyid,
2508 (isroot ? "root" : "not root"));
2510 handle_selfid(ohci, host, phyid, isroot);
2512 /* Clear the bus reset event and re-enable the
2513 * busReset interrupt. */
2514 spin_lock_irqsave(&ohci->event_lock, flags);
2515 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2516 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2517 spin_unlock_irqrestore(&ohci->event_lock, flags);
2519 /* Accept Physical requests from all nodes. */
2520 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2521 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2523 /* Turn on phys dma reception.
2525 * TODO: Enable some sort of filtering management.
2528 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2529 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2530 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2532 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2533 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2536 DBGMSG("PhyReqFilter=%08x%08x",
2537 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2538 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2540 hpsb_selfid_complete(host, phyid, isroot);
2543 "SelfID received outside of bus reset sequence");
2546 event &= ~OHCI1394_selfIDComplete;
2549 /* Make sure we handle everything, just in case we accidentally
2550 * enabled an interrupt that we didn't write a handler for. */
2552 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2558 /* Put the buffer back into the dma context */
2559 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2561 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2562 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2564 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2565 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2566 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2567 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2569 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2570 * context program descriptors before it sees the wakeup bit set. */
2573 /* wake up the dma context if necessary */
2574 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2576 "Waking dma ctx=%d ... processing is probably too slow",
2580 /* do this always, to avoid race condition */
2581 reg_write(ohci, d->ctrlSet, 0x1000);
2584 #define cond_le32_to_cpu(data, noswap) \
2585 (noswap ? data : le32_to_cpu(data))
2587 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2588 -1, 0, -1, 0, -1, -1, 16, -1};
2591 * Determine the length of a packet in the buffer
2592 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2594 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2595 int offset, unsigned char tcode, int noswap)
2599 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2600 length = TCODE_SIZE[tcode];
2602 if (offset + 12 >= d->buf_size) {
2603 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2604 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2606 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2610 } else if (d->type == DMA_CTX_ISO) {
2611 /* Assumption: buffer fill mode with header/trailer */
2612 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2615 if (length > 0 && length % 4)
2616 length += 4 - (length % 4);
2621 /* Tasklet that processes dma receive buffers */
2622 static void dma_rcv_tasklet (unsigned long data)
2624 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2625 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2626 unsigned int split_left, idx, offset, rescount;
2627 unsigned char tcode;
2628 int length, bytes_left, ack;
2629 unsigned long flags;
2634 spin_lock_irqsave(&d->lock, flags);
2637 offset = d->buf_offset;
2638 buf_ptr = d->buf_cpu[idx] + offset/4;
2640 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2641 bytes_left = d->buf_size - rescount - offset;
2643 while (bytes_left > 0) {
2644 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2646 /* packet_length() will return < 4 for an error */
2647 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2649 if (length < 4) { /* something is wrong */
2650 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2651 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2653 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2654 spin_unlock_irqrestore(&d->lock, flags);
2658 /* The first case is where we have a packet that crosses
2659 * over more than one descriptor. The next case is where
2660 * it's all in the first descriptor. */
2661 if ((offset + length) > d->buf_size) {
2662 DBGMSG("Split packet rcv'd");
2663 if (length > d->split_buf_size) {
2664 ohci1394_stop_context(ohci, d->ctrlClear,
2665 "Split packet size exceeded");
2667 d->buf_offset = offset;
2668 spin_unlock_irqrestore(&d->lock, flags);
2672 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2674 /* Other part of packet not written yet.
2675 * this should never happen I think
2676 * anyway we'll get it on the next call. */
2678 "Got only half a packet!");
2680 d->buf_offset = offset;
2681 spin_unlock_irqrestore(&d->lock, flags);
2685 split_left = length;
2686 split_ptr = (char *)d->spb;
2687 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2688 split_left -= d->buf_size-offset;
2689 split_ptr += d->buf_size-offset;
2690 insert_dma_buffer(d, idx);
2691 idx = (idx+1) % d->num_desc;
2692 buf_ptr = d->buf_cpu[idx];
2695 while (split_left >= d->buf_size) {
2696 memcpy(split_ptr,buf_ptr,d->buf_size);
2697 split_ptr += d->buf_size;
2698 split_left -= d->buf_size;
2699 insert_dma_buffer(d, idx);
2700 idx = (idx+1) % d->num_desc;
2701 buf_ptr = d->buf_cpu[idx];
2704 if (split_left > 0) {
2705 memcpy(split_ptr, buf_ptr, split_left);
2706 offset = split_left;
2707 buf_ptr += offset/4;
2710 DBGMSG("Single packet rcv'd");
2711 memcpy(d->spb, buf_ptr, length);
2713 buf_ptr += length/4;
2714 if (offset==d->buf_size) {
2715 insert_dma_buffer(d, idx);
2716 idx = (idx+1) % d->num_desc;
2717 buf_ptr = d->buf_cpu[idx];
2722 /* We get one phy packet to the async descriptor for each
2723 * bus reset. We always ignore it. */
2724 if (tcode != OHCI1394_TCODE_PHY) {
2725 if (!ohci->no_swap_incoming)
2726 packet_swab(d->spb, tcode);
2727 DBGMSG("Packet received from node"
2728 " %d ack=0x%02X spd=%d tcode=0x%X"
2729 " length=%d ctx=%d tlabel=%d",
2730 (d->spb[1]>>16)&0x3f,
2731 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2732 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2733 tcode, length, d->ctx,
2734 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2736 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2739 hpsb_packet_received(ohci->host, d->spb,
2742 #ifdef OHCI1394_DEBUG
2744 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2748 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2750 bytes_left = d->buf_size - rescount - offset;
2755 d->buf_offset = offset;
2757 spin_unlock_irqrestore(&d->lock, flags);
2760 /* Bottom half that processes sent packets */
2761 static void dma_trm_tasklet (unsigned long data)
2763 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2764 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2765 struct hpsb_packet *packet, *ptmp;
2766 unsigned long flags;
2770 spin_lock_irqsave(&d->lock, flags);
2772 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2773 datasize = packet->data_size;
2774 if (datasize && packet->type != hpsb_raw)
2775 status = le32_to_cpu(
2776 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2778 status = le32_to_cpu(
2779 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2782 /* this packet hasn't been sent yet*/
2785 #ifdef OHCI1394_DEBUG
2787 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2788 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2789 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2790 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2791 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2792 status&0x1f, (status>>5)&0x3,
2793 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2796 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2797 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2798 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2799 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2800 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2801 status&0x1f, (status>>5)&0x3,
2802 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2805 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2806 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2807 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2809 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2811 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2813 status&0x1f, (status>>5)&0x3,
2814 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2818 if (status & 0x10) {
2821 switch (status & 0x1f) {
2822 case EVT_NO_STATUS: /* that should never happen */
2823 case EVT_RESERVED_A: /* that should never happen */
2824 case EVT_LONG_PACKET: /* that should never happen */
2825 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2826 ack = ACKX_SEND_ERROR;
2828 case EVT_MISSING_ACK:
2832 ack = ACKX_SEND_ERROR;
2834 case EVT_OVERRUN: /* that should never happen */
2835 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2836 ack = ACKX_SEND_ERROR;
2838 case EVT_DESCRIPTOR_READ:
2840 case EVT_DATA_WRITE:
2841 ack = ACKX_SEND_ERROR;
2843 case EVT_BUS_RESET: /* that should never happen */
2844 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2845 ack = ACKX_SEND_ERROR;
2851 ack = ACKX_SEND_ERROR;
2853 case EVT_RESERVED_B: /* that should never happen */
2854 case EVT_RESERVED_C: /* that should never happen */
2855 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2856 ack = ACKX_SEND_ERROR;
2860 ack = ACKX_SEND_ERROR;
2863 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2864 ack = ACKX_SEND_ERROR;
2869 list_del_init(&packet->driver_list);
2870 hpsb_packet_sent(ohci->host, packet, ack);
2873 pci_unmap_single(ohci->dev,
2874 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2875 datasize, PCI_DMA_TODEVICE);
2876 OHCI_DMA_FREE("single Xmit data packet");
2879 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2883 dma_trm_flush(ohci, d);
2885 spin_unlock_irqrestore(&d->lock, flags);
2888 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2891 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2893 if (d->type == DMA_CTX_ISO) {
2894 /* disable interrupts */
2895 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2896 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2898 tasklet_kill(&d->task);
2904 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2907 struct ti_ohci *ohci = d->ohci;
2912 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2915 for (i=0; i<d->num_desc; i++)
2916 if (d->buf_cpu[i] && d->buf_bus[i]) {
2917 pci_free_consistent(
2918 ohci->dev, d->buf_size,
2919 d->buf_cpu[i], d->buf_bus[i]);
2920 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2926 for (i=0; i<d->num_desc; i++)
2927 if (d->prg_cpu[i] && d->prg_bus[i]) {
2928 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2929 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2931 pci_pool_destroy(d->prg_pool);
2932 OHCI_DMA_FREE("dma_rcv prg pool");
2938 /* Mark this context as freed. */
2943 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2944 enum context_type type, int ctx, int num_desc,
2945 int buf_size, int split_buf_size, int context_base)
2948 static int num_allocs;
2949 static char pool_name[20];
2955 d->num_desc = num_desc;
2956 d->buf_size = buf_size;
2957 d->split_buf_size = split_buf_size;
2963 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2964 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2966 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968 free_dma_rcv_ctx(d);
2972 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2973 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2975 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2976 PRINT(KERN_ERR, "Failed to allocate dma prg");
2977 free_dma_rcv_ctx(d);
2981 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2983 if (d->spb == NULL) {
2984 PRINT(KERN_ERR, "Failed to allocate split buffer");
2985 free_dma_rcv_ctx(d);
2989 len = sprintf(pool_name, "ohci1394_rcv_prg");
2990 sprintf(pool_name+len, "%d", num_allocs);
2991 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2992 sizeof(struct dma_cmd), 4, 0);
2993 if(d->prg_pool == NULL)
2995 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2996 free_dma_rcv_ctx(d);
3001 OHCI_DMA_ALLOC("dma_rcv prg pool");
3003 for (i=0; i<d->num_desc; i++) {
3004 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3007 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3009 if (d->buf_cpu[i] != NULL) {
3010 memset(d->buf_cpu[i], 0, d->buf_size);
3013 "Failed to allocate dma buffer");
3014 free_dma_rcv_ctx(d);
3018 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3019 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3021 if (d->prg_cpu[i] != NULL) {
3022 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3025 "Failed to allocate dma prg");
3026 free_dma_rcv_ctx(d);
3031 spin_lock_init(&d->lock);
3033 if (type == DMA_CTX_ISO) {
3034 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3035 OHCI_ISO_MULTICHANNEL_RECEIVE,
3036 dma_rcv_tasklet, (unsigned long) d);
3038 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3039 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3040 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3042 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3048 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3051 struct ti_ohci *ohci = d->ohci;
3056 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3059 for (i=0; i<d->num_desc; i++)
3060 if (d->prg_cpu[i] && d->prg_bus[i]) {
3061 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3062 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3064 pci_pool_destroy(d->prg_pool);
3065 OHCI_DMA_FREE("dma_trm prg pool");
3070 /* Mark this context as freed. */
3075 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3076 enum context_type type, int ctx, int num_desc,
3080 static char pool_name[20];
3081 static int num_allocs=0;
3086 d->num_desc = num_desc;
3091 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3092 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3094 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3095 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3096 free_dma_trm_ctx(d);
3100 len = sprintf(pool_name, "ohci1394_trm_prg");
3101 sprintf(pool_name+len, "%d", num_allocs);
3102 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3103 sizeof(struct at_dma_prg), 4, 0);
3104 if (d->prg_pool == NULL) {
3105 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3106 free_dma_trm_ctx(d);
3111 OHCI_DMA_ALLOC("dma_rcv prg pool");
3113 for (i = 0; i < d->num_desc; i++) {
3114 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3115 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3117 if (d->prg_cpu[i] != NULL) {
3118 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3121 "Failed to allocate at dma prg");
3122 free_dma_trm_ctx(d);
3127 spin_lock_init(&d->lock);
3129 /* initialize tasklet */
3130 if (type == DMA_CTX_ISO) {
3131 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3132 dma_trm_tasklet, (unsigned long) d);
3133 if (ohci1394_register_iso_tasklet(ohci,
3134 &ohci->it_legacy_tasklet) < 0) {
3135 PRINT(KERN_ERR, "No IT DMA context available");
3136 free_dma_trm_ctx(d);
3140 /* IT can be assigned to any context by register_iso_tasklet */
3141 d->ctx = ohci->it_legacy_tasklet.context;
3142 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3143 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3144 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3146 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3147 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3148 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3149 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3155 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3157 struct ti_ohci *ohci = host->hostdata;
3159 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3160 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3162 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3166 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3167 quadlet_t data, quadlet_t compare)
3169 struct ti_ohci *ohci = host->hostdata;
3172 reg_write(ohci, OHCI1394_CSRData, data);
3173 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3174 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3176 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3177 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3183 return reg_read(ohci, OHCI1394_CSRData);
3186 static struct hpsb_host_driver ohci1394_driver = {
3187 .owner = THIS_MODULE,
3188 .name = OHCI1394_DRIVER_NAME,
3189 .set_hw_config_rom = ohci_set_hw_config_rom,
3190 .transmit_packet = ohci_transmit,
3191 .devctl = ohci_devctl,
3192 .isoctl = ohci_isoctl,
3193 .hw_csr_reg = ohci_hw_csr_reg,
3196 /***********************************
3197 * PCI Driver Interface functions *
3198 ***********************************/
3200 #define FAIL(err, fmt, args...) \
3202 PRINT_G(KERN_ERR, fmt , ## args); \
3203 ohci1394_pci_remove(dev); \
3207 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3208 const struct pci_device_id *ent)
3210 struct hpsb_host *host;
3211 struct ti_ohci *ohci; /* shortcut to currently handled device */
3212 unsigned long ohci_base;
3214 if (pci_enable_device(dev))
3215 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3216 pci_set_master(dev);
3218 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3219 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3221 ohci = host->hostdata;
3224 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3226 pci_set_drvdata(dev, ohci);
3228 /* We don't want hardware swapping */
3229 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3231 /* Some oddball Apple controllers do not order the selfid
3232 * properly, so we make up for it here. */
3233 #ifndef __LITTLE_ENDIAN
3234 /* XXX: Need a better way to check this. I'm wondering if we can
3235 * read the values of the OHCI1394_PCI_HCI_Control and the
3236 * noByteSwapData registers to see if they were not cleared to
3237 * zero. Should this work? Obviously it's not defined what these
3238 * registers will read when they aren't supported. Bleh! */
3239 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3240 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3241 ohci->no_swap_incoming = 1;
3242 ohci->selfid_swap = 0;
3244 ohci->selfid_swap = 1;
3248 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3249 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3252 /* These chipsets require a bit of extra care when checking after
3254 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3255 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3256 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3257 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3258 ohci->check_busreset = 1;
3260 /* We hardwire the MMIO length, since some CardBus adaptors
3261 * fail to report the right length. Anyway, the ohci spec
3262 * clearly says it's 2kb, so this shouldn't be a problem. */
3263 ohci_base = pci_resource_start(dev, 0);
3264 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3265 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3266 pci_resource_len(dev, 0));
3268 /* Seems PCMCIA handles this internally. Not sure why. Seems
3269 * pretty bogus to force a driver to special case this. */
3271 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3272 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3273 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3275 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3277 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3278 if (ohci->registers == NULL)
3279 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3280 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3281 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3283 /* csr_config rom allocation */
3284 ohci->csr_config_rom_cpu =
3285 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3286 &ohci->csr_config_rom_bus);
3287 OHCI_DMA_ALLOC("consistent csr_config_rom");
3288 if (ohci->csr_config_rom_cpu == NULL)
3289 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3290 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3292 /* self-id dma buffer allocation */
3293 ohci->selfid_buf_cpu =
3294 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3295 &ohci->selfid_buf_bus);
3296 OHCI_DMA_ALLOC("consistent selfid_buf");
3298 if (ohci->selfid_buf_cpu == NULL)
3299 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3300 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3302 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3303 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3304 "8Kb boundary... may cause problems on some CXD3222 chip",
3305 ohci->selfid_buf_cpu);
3307 /* No self-id errors at startup */
3308 ohci->self_id_errors = 0;
3310 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3311 /* AR DMA request context allocation */
3312 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3313 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3314 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3315 OHCI1394_AsReqRcvContextBase) < 0)
3316 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3318 /* AR DMA response context allocation */
3319 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3320 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3321 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3322 OHCI1394_AsRspRcvContextBase) < 0)
3323 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3325 /* AT DMA request context */
3326 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3327 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3328 OHCI1394_AsReqTrContextBase) < 0)
3329 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3331 /* AT DMA response context */
3332 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3333 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3334 OHCI1394_AsRspTrContextBase) < 0)
3335 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3337 /* Start off with a soft reset, to clear everything to a sane
3339 ohci_soft_reset(ohci);
3341 /* Now enable LPS, which we need in order to start accessing
3342 * most of the registers. In fact, on some cards (ALI M5251),
3343 * accessing registers in the SClk domain without LPS enabled
3344 * will lock up the machine. Wait 50msec to make sure we have
3345 * full link enabled. */
3346 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3348 /* Disable and clear interrupts */
3349 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3350 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3354 /* Determine the number of available IR and IT contexts. */
3355 ohci->nb_iso_rcv_ctx =
3356 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3357 ohci->nb_iso_xmit_ctx =
3358 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3360 /* Set the usage bits for non-existent contexts so they can't
3362 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3363 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3365 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3366 spin_lock_init(&ohci->iso_tasklet_list_lock);
3367 ohci->ISO_channel_usage = 0;
3368 spin_lock_init(&ohci->IR_channel_lock);
3370 /* Allocate the IR DMA context right here so we don't have
3371 * to do it in interrupt path - note that this doesn't
3372 * waste much memory and avoids the jugglery required to
3373 * allocate it in IRQ path. */
3374 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3375 DMA_CTX_ISO, 0, IR_NUM_DESC,
3376 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3377 OHCI1394_IsoRcvContextBase) < 0) {
3378 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3381 /* We hopefully don't have to pre-allocate IT DMA like we did
3382 * for IR DMA above. Allocate it on-demand and mark inactive. */
3383 ohci->it_legacy_context.ohci = NULL;
3384 spin_lock_init(&ohci->event_lock);
3387 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3388 * might get called anyway. We'll see no event, of course, but
3389 * we need to get to that "no event", so enough should be initialized
3392 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3393 OHCI1394_DRIVER_NAME, ohci))
3394 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3396 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3397 ohci_initialize(ohci);
3399 /* Set certain csr values */
3400 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3401 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3402 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3403 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3404 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3406 /* Tell the highlevel this host is ready */
3407 if (hpsb_add_host(host))
3408 FAIL(-ENOMEM, "Failed to register host with highlevel");
3410 ohci->init_state = OHCI_INIT_DONE;
3416 static void ohci1394_pci_remove(struct pci_dev *pdev)
3418 struct ti_ohci *ohci;
3421 ohci = pci_get_drvdata(pdev);
3425 dev = get_device(&ohci->host->device);
3427 switch (ohci->init_state) {
3428 case OHCI_INIT_DONE:
3429 hpsb_remove_host(ohci->host);
3431 /* Clear out BUS Options */
3432 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3433 reg_write(ohci, OHCI1394_BusOptions,
3434 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3436 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3438 case OHCI_INIT_HAVE_IRQ:
3439 /* Clear interrupt registers */
3440 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3441 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3442 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3443 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3444 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3445 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3447 /* Disable IRM Contender */
3448 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3450 /* Clear link control register */
3451 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3453 /* Let all other nodes know to ignore us */
3454 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3456 /* Soft reset before we start - this disables
3457 * interrupts and clears linkEnable and LPS. */
3458 ohci_soft_reset(ohci);
3459 free_irq(ohci->dev->irq, ohci);
3461 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3462 /* The ohci_soft_reset() stops all DMA contexts, so we
3463 * dont need to do this. */
3465 free_dma_rcv_ctx(&ohci->ar_req_context);
3466 free_dma_rcv_ctx(&ohci->ar_resp_context);
3469 free_dma_trm_ctx(&ohci->at_req_context);
3470 free_dma_trm_ctx(&ohci->at_resp_context);
3473 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3476 free_dma_trm_ctx(&ohci->it_legacy_context);
3478 /* Free IR legacy dma */
3479 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3482 case OHCI_INIT_HAVE_SELFID_BUFFER:
3483 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3484 ohci->selfid_buf_cpu,
3485 ohci->selfid_buf_bus);
3486 OHCI_DMA_FREE("consistent selfid_buf");
3488 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3489 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3490 ohci->csr_config_rom_cpu,
3491 ohci->csr_config_rom_bus);
3492 OHCI_DMA_FREE("consistent csr_config_rom");
3494 case OHCI_INIT_HAVE_IOMAPPING:
3495 iounmap(ohci->registers);
3497 case OHCI_INIT_HAVE_MEM_REGION:
3499 release_mem_region(pci_resource_start(ohci->dev, 0),
3500 OHCI1394_REGISTER_SIZE);
3503 #ifdef CONFIG_PPC_PMAC
3504 /* On UniNorth, power down the cable and turn off the chip
3505 * clock when the module is removed to save power on
3506 * laptops. Turning it back ON is done by the arch code when
3507 * pci_enable_device() is called */
3509 struct device_node* of_node;
3511 of_node = pci_device_to_OF_node(ohci->dev);
3513 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3514 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3517 #endif /* CONFIG_PPC_PMAC */
3519 case OHCI_INIT_ALLOC_HOST:
3520 pci_set_drvdata(ohci->dev, NULL);
3528 static int ohci1394_pci_resume (struct pci_dev *pdev)
3530 #ifdef CONFIG_PPC_PMAC
3531 if (_machine == _MACH_Pmac) {
3532 struct device_node *of_node;
3534 /* Re-enable 1394 */
3535 of_node = pci_device_to_OF_node (pdev);
3537 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3539 #endif /* CONFIG_PPC_PMAC */
3541 pci_enable_device(pdev);
3547 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3549 #ifdef CONFIG_PPC_PMAC
3550 if (_machine == _MACH_Pmac) {
3551 struct device_node *of_node;
3554 of_node = pci_device_to_OF_node (pdev);
3556 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3564 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3566 static struct pci_device_id ohci1394_pci_tbl[] = {
3568 .class = PCI_CLASS_FIREWIRE_OHCI,
3569 .class_mask = PCI_ANY_ID,
3570 .vendor = PCI_ANY_ID,
3571 .device = PCI_ANY_ID,
3572 .subvendor = PCI_ANY_ID,
3573 .subdevice = PCI_ANY_ID,
3578 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3580 static struct pci_driver ohci1394_pci_driver = {
3581 .name = OHCI1394_DRIVER_NAME,
3582 .id_table = ohci1394_pci_tbl,
3583 .probe = ohci1394_pci_probe,
3584 .remove = ohci1394_pci_remove,
3585 .resume = ohci1394_pci_resume,
3586 .suspend = ohci1394_pci_suspend,
3589 /***********************************
3590 * OHCI1394 Video Interface *
3591 ***********************************/
3593 /* essentially the only purpose of this code is to allow another
3594 module to hook into ohci's interrupt handler */
3596 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3600 /* stop the channel program if it's still running */
3601 reg_write(ohci, reg, 0x8000);
3603 /* Wait until it effectively stops */
3604 while (reg_read(ohci, reg) & 0x400) {
3608 "Runaway loop while stopping context: %s...", msg ? msg : "");
3615 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3619 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3620 void (*func)(unsigned long), unsigned long data)
3622 tasklet_init(&tasklet->tasklet, func, data);
3623 tasklet->type = type;
3624 /* We init the tasklet->link field, so we can list_del() it
3625 * without worrying whether it was added to the list or not. */
3626 INIT_LIST_HEAD(&tasklet->link);
3629 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3630 struct ohci1394_iso_tasklet *tasklet)
3632 unsigned long flags, *usage;
3633 int n, i, r = -EBUSY;
3635 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3636 n = ohci->nb_iso_xmit_ctx;
3637 usage = &ohci->it_ctx_usage;
3640 n = ohci->nb_iso_rcv_ctx;
3641 usage = &ohci->ir_ctx_usage;
3643 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3644 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3645 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3651 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3653 for (i = 0; i < n; i++)
3654 if (!test_and_set_bit(i, usage)) {
3655 tasklet->context = i;
3656 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3661 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3666 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3667 struct ohci1394_iso_tasklet *tasklet)
3669 unsigned long flags;
3671 tasklet_kill(&tasklet->tasklet);
3673 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3675 if (tasklet->type == OHCI_ISO_TRANSMIT)
3676 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3678 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3680 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3681 clear_bit(0, &ohci->ir_multichannel_used);
3685 list_del(&tasklet->link);
3687 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3690 EXPORT_SYMBOL(ohci1394_stop_context);
3691 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3692 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3693 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3695 /***********************************
3696 * General module initialization *
3697 ***********************************/
3699 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3700 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3701 MODULE_LICENSE("GPL");
3703 static void __exit ohci1394_cleanup (void)
3705 pci_unregister_driver(&ohci1394_pci_driver);
3708 static int __init ohci1394_init(void)
3710 return pci_register_driver(&ohci1394_pci_driver);
3713 module_init(ohci1394_init);
3714 module_exit(ohci1394_cleanup);