]> err.no Git - linux-2.6/blob - drivers/firewire/fw-ohci.c
9e8a8f909303e29953561c6564f3dd3356dc80ea
[linux-2.6] / drivers / firewire / fw-ohci.c
1 /*                                              -*- c-basic-offset: 8 -*-
2  *
3  * fw-ohci.c - Driver for OHCI 1394 boards
4  * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/poll.h>
28 #include <linux/dma-mapping.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/semaphore.h>
32
33 #include "fw-transaction.h"
34 #include "fw-ohci.h"
35
36 #define descriptor_output_more          0
37 #define descriptor_output_last          (1 << 12)
38 #define descriptor_input_more           (2 << 12)
39 #define descriptor_input_last           (3 << 12)
40 #define descriptor_status               (1 << 11)
41 #define descriptor_key_immediate        (2 << 8)
42 #define descriptor_ping                 (1 << 7)
43 #define descriptor_yy                   (1 << 6)
44 #define descriptor_no_irq               (0 << 4)
45 #define descriptor_irq_error            (1 << 4)
46 #define descriptor_irq_always           (3 << 4)
47 #define descriptor_branch_always        (3 << 2)
48 #define descriptor_wait                 (3 << 0)
49
50 struct descriptor {
51         __le16 req_count;
52         __le16 control;
53         __le32 data_address;
54         __le32 branch_address;
55         __le16 res_count;
56         __le16 transfer_status;
57 } __attribute__((aligned(16)));
58
59 struct db_descriptor {
60         __le16 first_size;
61         __le16 control;
62         __le16 second_req_count;
63         __le16 first_req_count;
64         __le32 branch_address;
65         __le16 second_res_count;
66         __le16 first_res_count;
67         __le32 reserved0;
68         __le32 first_buffer;
69         __le32 second_buffer;
70         __le32 reserved1;
71 } __attribute__((aligned(16)));
72
73 #define control_set(regs)       (regs)
74 #define control_clear(regs)     ((regs) + 4)
75 #define command_ptr(regs)       ((regs) + 12)
76 #define context_match(regs)     ((regs) + 16)
77
78 struct ar_buffer {
79         struct descriptor descriptor;
80         struct ar_buffer *next;
81         __le32 data[0];
82 };
83
84 struct ar_context {
85         struct fw_ohci *ohci;
86         struct ar_buffer *current_buffer;
87         struct ar_buffer *last_buffer;
88         void *pointer;
89         u32 regs;
90         struct tasklet_struct tasklet;
91 };
92
93 struct context;
94
95 typedef int (*descriptor_callback_t)(struct context *ctx,
96                                      struct descriptor *d,
97                                      struct descriptor *last);
98 struct context {
99         struct fw_ohci *ohci;
100         u32 regs;
101
102         struct descriptor *buffer;
103         dma_addr_t buffer_bus;
104         size_t buffer_size;
105         struct descriptor *head_descriptor;
106         struct descriptor *tail_descriptor;
107         struct descriptor *tail_descriptor_last;
108         struct descriptor *prev_descriptor;
109
110         descriptor_callback_t callback;
111
112         struct tasklet_struct tasklet;
113 };
114
115 #define it_header_sy(v)          ((v) <<  0)
116 #define it_header_tcode(v)       ((v) <<  4)
117 #define it_header_channel(v)     ((v) <<  8)
118 #define it_header_tag(v)         ((v) << 14)
119 #define it_header_speed(v)       ((v) << 16)
120 #define it_header_data_length(v) ((v) << 16)
121
122 struct iso_context {
123         struct fw_iso_context base;
124         struct context context;
125         void *header;
126         size_t header_length;
127 };
128
129 #define CONFIG_ROM_SIZE 1024
130
131 struct fw_ohci {
132         struct fw_card card;
133
134         u32 version;
135         __iomem char *registers;
136         dma_addr_t self_id_bus;
137         __le32 *self_id_cpu;
138         struct tasklet_struct bus_reset_tasklet;
139         int node_id;
140         int generation;
141         int request_generation;
142
143         /* Spinlock for accessing fw_ohci data.  Never call out of
144          * this driver with this lock held. */
145         spinlock_t lock;
146         u32 self_id_buffer[512];
147
148         /* Config rom buffers */
149         __be32 *config_rom;
150         dma_addr_t config_rom_bus;
151         __be32 *next_config_rom;
152         dma_addr_t next_config_rom_bus;
153         u32 next_header;
154
155         struct ar_context ar_request_ctx;
156         struct ar_context ar_response_ctx;
157         struct context at_request_ctx;
158         struct context at_response_ctx;
159
160         u32 it_context_mask;
161         struct iso_context *it_context_list;
162         u32 ir_context_mask;
163         struct iso_context *ir_context_list;
164 };
165
166 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
167 {
168         return container_of(card, struct fw_ohci, card);
169 }
170
171 #define IT_CONTEXT_CYCLE_MATCH_ENABLE   0x80000000
172 #define IR_CONTEXT_BUFFER_FILL          0x80000000
173 #define IR_CONTEXT_ISOCH_HEADER         0x40000000
174 #define IR_CONTEXT_CYCLE_MATCH_ENABLE   0x20000000
175 #define IR_CONTEXT_MULTI_CHANNEL_MODE   0x10000000
176 #define IR_CONTEXT_DUAL_BUFFER_MODE     0x08000000
177
178 #define CONTEXT_RUN     0x8000
179 #define CONTEXT_WAKE    0x1000
180 #define CONTEXT_DEAD    0x0800
181 #define CONTEXT_ACTIVE  0x0400
182
183 #define OHCI1394_MAX_AT_REQ_RETRIES     0x2
184 #define OHCI1394_MAX_AT_RESP_RETRIES    0x2
185 #define OHCI1394_MAX_PHYS_RESP_RETRIES  0x8
186
187 #define FW_OHCI_MAJOR                   240
188 #define OHCI1394_REGISTER_SIZE          0x800
189 #define OHCI_LOOP_COUNT                 500
190 #define OHCI1394_PCI_HCI_Control        0x40
191 #define SELF_ID_BUF_SIZE                0x800
192 #define OHCI_TCODE_PHY_PACKET           0x0e
193 #define OHCI_VERSION_1_1                0x010010
194 #define ISO_BUFFER_SIZE                 (64 * 1024)
195 #define AT_BUFFER_SIZE                  4096
196
197 static char ohci_driver_name[] = KBUILD_MODNAME;
198
199 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
200 {
201         writel(data, ohci->registers + offset);
202 }
203
204 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
205 {
206         return readl(ohci->registers + offset);
207 }
208
209 static inline void flush_writes(const struct fw_ohci *ohci)
210 {
211         /* Do a dummy read to flush writes. */
212         reg_read(ohci, OHCI1394_Version);
213 }
214
215 static int
216 ohci_update_phy_reg(struct fw_card *card, int addr,
217                     int clear_bits, int set_bits)
218 {
219         struct fw_ohci *ohci = fw_ohci(card);
220         u32 val, old;
221
222         reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
223         msleep(2);
224         val = reg_read(ohci, OHCI1394_PhyControl);
225         if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
226                 fw_error("failed to set phy reg bits.\n");
227                 return -EBUSY;
228         }
229
230         old = OHCI1394_PhyControl_ReadData(val);
231         old = (old & ~clear_bits) | set_bits;
232         reg_write(ohci, OHCI1394_PhyControl,
233                   OHCI1394_PhyControl_Write(addr, old));
234
235         return 0;
236 }
237
238 static int ar_context_add_page(struct ar_context *ctx)
239 {
240         struct device *dev = ctx->ohci->card.device;
241         struct ar_buffer *ab;
242         dma_addr_t ab_bus;
243         size_t offset;
244
245         ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
246         if (ab == NULL)
247                 return -ENOMEM;
248
249         ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
250         if (dma_mapping_error(ab_bus)) {
251                 free_page((unsigned long) ab);
252                 return -ENOMEM;
253         }
254
255         memset(&ab->descriptor, 0, sizeof ab->descriptor);
256         ab->descriptor.control        = cpu_to_le16(descriptor_input_more |
257                                                     descriptor_status |
258                                                     descriptor_branch_always);
259         offset = offsetof(struct ar_buffer, data);
260         ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
261         ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
262         ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
263         ab->descriptor.branch_address = 0;
264
265         dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
266
267         ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
268         ctx->last_buffer->next = ab;
269         ctx->last_buffer = ab;
270
271         reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
272         flush_writes(ctx->ohci);
273
274         return 0;
275 }
276
277 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
278 {
279         struct fw_ohci *ohci = ctx->ohci;
280         struct fw_packet p;
281         u32 status, length, tcode;
282
283         p.header[0] = le32_to_cpu(buffer[0]);
284         p.header[1] = le32_to_cpu(buffer[1]);
285         p.header[2] = le32_to_cpu(buffer[2]);
286
287         tcode = (p.header[0] >> 4) & 0x0f;
288         switch (tcode) {
289         case TCODE_WRITE_QUADLET_REQUEST:
290         case TCODE_READ_QUADLET_RESPONSE:
291                 p.header[3] = (__force __u32) buffer[3];
292                 p.header_length = 16;
293                 p.payload_length = 0;
294                 break;
295
296         case TCODE_READ_BLOCK_REQUEST :
297                 p.header[3] = le32_to_cpu(buffer[3]);
298                 p.header_length = 16;
299                 p.payload_length = 0;
300                 break;
301
302         case TCODE_WRITE_BLOCK_REQUEST:
303         case TCODE_READ_BLOCK_RESPONSE:
304         case TCODE_LOCK_REQUEST:
305         case TCODE_LOCK_RESPONSE:
306                 p.header[3] = le32_to_cpu(buffer[3]);
307                 p.header_length = 16;
308                 p.payload_length = p.header[3] >> 16;
309                 break;
310
311         case TCODE_WRITE_RESPONSE:
312         case TCODE_READ_QUADLET_REQUEST:
313         case OHCI_TCODE_PHY_PACKET:
314                 p.header_length = 12;
315                 p.payload_length = 0;
316                 break;
317         }
318
319         p.payload = (void *) buffer + p.header_length;
320
321         /* FIXME: What to do about evt_* errors? */
322         length = (p.header_length + p.payload_length + 3) / 4;
323         status = le32_to_cpu(buffer[length]);
324
325         p.ack        = ((status >> 16) & 0x1f) - 16;
326         p.speed      = (status >> 21) & 0x7;
327         p.timestamp  = status & 0xffff;
328         p.generation = ohci->request_generation;
329
330         /* The OHCI bus reset handler synthesizes a phy packet with
331          * the new generation number when a bus reset happens (see
332          * section 8.4.2.3).  This helps us determine when a request
333          * was received and make sure we send the response in the same
334          * generation.  We only need this for requests; for responses
335          * we use the unique tlabel for finding the matching
336          * request. */
337
338         if (p.ack + 16 == 0x09)
339                 ohci->request_generation = (buffer[2] >> 16) & 0xff;
340         else if (ctx == &ohci->ar_request_ctx)
341                 fw_core_handle_request(&ohci->card, &p);
342         else
343                 fw_core_handle_response(&ohci->card, &p);
344
345         return buffer + length + 1;
346 }
347
348 static void ar_context_tasklet(unsigned long data)
349 {
350         struct ar_context *ctx = (struct ar_context *)data;
351         struct fw_ohci *ohci = ctx->ohci;
352         struct ar_buffer *ab;
353         struct descriptor *d;
354         void *buffer, *end;
355
356         ab = ctx->current_buffer;
357         d = &ab->descriptor;
358
359         if (d->res_count == 0) {
360                 size_t size, rest, offset;
361
362                 /* This descriptor is finished and we may have a
363                  * packet split across this and the next buffer. We
364                  * reuse the page for reassembling the split packet. */
365
366                 offset = offsetof(struct ar_buffer, data);
367                 dma_unmap_single(ohci->card.device,
368                                  ab->descriptor.data_address - offset,
369                                  PAGE_SIZE, DMA_BIDIRECTIONAL);
370
371                 buffer = ab;
372                 ab = ab->next;
373                 d = &ab->descriptor;
374                 size = buffer + PAGE_SIZE - ctx->pointer;
375                 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
376                 memmove(buffer, ctx->pointer, size);
377                 memcpy(buffer + size, ab->data, rest);
378                 ctx->current_buffer = ab;
379                 ctx->pointer = (void *) ab->data + rest;
380                 end = buffer + size + rest;
381
382                 while (buffer < end)
383                         buffer = handle_ar_packet(ctx, buffer);
384
385                 free_page((unsigned long)buffer);
386                 ar_context_add_page(ctx);
387         } else {
388                 buffer = ctx->pointer;
389                 ctx->pointer = end =
390                         (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
391
392                 while (buffer < end)
393                         buffer = handle_ar_packet(ctx, buffer);
394         }
395 }
396
397 static int
398 ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
399 {
400         struct ar_buffer ab;
401
402         ctx->regs        = regs;
403         ctx->ohci        = ohci;
404         ctx->last_buffer = &ab;
405         tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
406
407         ar_context_add_page(ctx);
408         ar_context_add_page(ctx);
409         ctx->current_buffer = ab.next;
410         ctx->pointer = ctx->current_buffer->data;
411
412         reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address);
413         reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN);
414         flush_writes(ctx->ohci);
415
416         return 0;
417 }
418
419 static void context_tasklet(unsigned long data)
420 {
421         struct context *ctx = (struct context *) data;
422         struct fw_ohci *ohci = ctx->ohci;
423         struct descriptor *d, *last;
424         u32 address;
425         int z;
426
427         dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
428                                 ctx->buffer_size, DMA_TO_DEVICE);
429
430         d    = ctx->tail_descriptor;
431         last = ctx->tail_descriptor_last;
432
433         while (last->branch_address != 0) {
434                 address = le32_to_cpu(last->branch_address);
435                 z = address & 0xf;
436                 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d;
437                 last = (z == 2) ? d : d + z - 1;
438
439                 if (!ctx->callback(ctx, d, last))
440                         break;
441
442                 ctx->tail_descriptor      = d;
443                 ctx->tail_descriptor_last = last;
444         }
445 }
446
447 static int
448 context_init(struct context *ctx, struct fw_ohci *ohci,
449              size_t buffer_size, u32 regs,
450              descriptor_callback_t callback)
451 {
452         ctx->ohci = ohci;
453         ctx->regs = regs;
454         ctx->buffer_size = buffer_size;
455         ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
456         if (ctx->buffer == NULL)
457                 return -ENOMEM;
458
459         tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
460         ctx->callback = callback;
461
462         ctx->buffer_bus =
463                 dma_map_single(ohci->card.device, ctx->buffer,
464                                buffer_size, DMA_TO_DEVICE);
465         if (dma_mapping_error(ctx->buffer_bus)) {
466                 kfree(ctx->buffer);
467                 return -ENOMEM;
468         }
469
470         ctx->head_descriptor      = ctx->buffer;
471         ctx->prev_descriptor      = ctx->buffer;
472         ctx->tail_descriptor      = ctx->buffer;
473         ctx->tail_descriptor_last = ctx->buffer;
474
475         /* We put a dummy descriptor in the buffer that has a NULL
476          * branch address and looks like it's been sent.  That way we
477          * have a descriptor to append DMA programs to.  Also, the
478          * ring buffer invariant is that it always has at least one
479          * element so that head == tail means buffer full. */
480
481         memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
482         ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
483         ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
484         ctx->head_descriptor++;
485
486         return 0;
487 }
488
489 static void
490 context_release(struct context *ctx)
491 {
492         struct fw_card *card = &ctx->ohci->card;
493
494         dma_unmap_single(card->device, ctx->buffer_bus,
495                          ctx->buffer_size, DMA_TO_DEVICE);
496         kfree(ctx->buffer);
497 }
498
499 static struct descriptor *
500 context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
501 {
502         struct descriptor *d, *tail, *end;
503
504         d = ctx->head_descriptor;
505         tail = ctx->tail_descriptor;
506         end = ctx->buffer + ctx->buffer_size / sizeof(struct descriptor);
507
508         if (d + z <= tail) {
509                 goto has_space;
510         } else if (d > tail && d + z <= end) {
511                 goto has_space;
512         } else if (d > tail && ctx->buffer + z <= tail) {
513                 d = ctx->buffer;
514                 goto has_space;
515         }
516
517         return NULL;
518
519  has_space:
520         memset(d, 0, z * sizeof *d);
521         *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
522
523         return d;
524 }
525
526 static void context_run(struct context *ctx, u32 extra)
527 {
528         struct fw_ohci *ohci = ctx->ohci;
529
530         reg_write(ohci, command_ptr(ctx->regs),
531                   le32_to_cpu(ctx->tail_descriptor_last->branch_address));
532         reg_write(ohci, control_clear(ctx->regs), ~0);
533         reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra);
534         flush_writes(ohci);
535 }
536
537 static void context_append(struct context *ctx,
538                            struct descriptor *d, int z, int extra)
539 {
540         dma_addr_t d_bus;
541
542         d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
543
544         ctx->head_descriptor = d + z + extra;
545         ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
546         ctx->prev_descriptor = z == 2 ? d : d + z - 1;
547
548         dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
549                                    ctx->buffer_size, DMA_TO_DEVICE);
550
551         reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
552         flush_writes(ctx->ohci);
553 }
554
555 static void context_stop(struct context *ctx)
556 {
557         u32 reg;
558         int i;
559
560         reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
561         flush_writes(ctx->ohci);
562
563         for (i = 0; i < 10; i++) {
564                 reg = reg_read(ctx->ohci, control_set(ctx->regs));
565                 if ((reg & CONTEXT_ACTIVE) == 0)
566                         break;
567
568                 fw_notify("context_stop: still active (0x%08x)\n", reg);
569                 msleep(1);
570         }
571 }
572
573 struct driver_data {
574         struct fw_packet *packet;
575 };
576
577 /* This function apppends a packet to the DMA queue for transmission.
578  * Must always be called with the ochi->lock held to ensure proper
579  * generation handling and locking around packet queue manipulation. */
580 static int
581 at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
582 {
583         struct fw_ohci *ohci = ctx->ohci;
584         dma_addr_t d_bus, payload_bus;
585         struct driver_data *driver_data;
586         struct descriptor *d, *last;
587         __le32 *header;
588         int z, tcode;
589         u32 reg;
590
591         d = context_get_descriptors(ctx, 4, &d_bus);
592         if (d == NULL) {
593                 packet->ack = RCODE_SEND_ERROR;
594                 return -1;
595         }
596
597         d[0].control   = cpu_to_le16(descriptor_key_immediate);
598         d[0].res_count = cpu_to_le16(packet->timestamp);
599
600         /* The DMA format for asyncronous link packets is different
601          * from the IEEE1394 layout, so shift the fields around
602          * accordingly.  If header_length is 8, it's a PHY packet, to
603          * which we need to prepend an extra quadlet. */
604
605         header = (__le32 *) &d[1];
606         if (packet->header_length > 8) {
607                 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
608                                         (packet->speed << 16));
609                 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
610                                         (packet->header[0] & 0xffff0000));
611                 header[2] = cpu_to_le32(packet->header[2]);
612
613                 tcode = (packet->header[0] >> 4) & 0x0f;
614                 if (TCODE_IS_BLOCK_PACKET(tcode))
615                         header[3] = cpu_to_le32(packet->header[3]);
616                 else
617                         header[3] = (__force __le32) packet->header[3];
618
619                 d[0].req_count = cpu_to_le16(packet->header_length);
620         } else {
621                 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
622                                         (packet->speed << 16));
623                 header[1] = cpu_to_le32(packet->header[0]);
624                 header[2] = cpu_to_le32(packet->header[1]);
625                 d[0].req_count = cpu_to_le16(12);
626         }
627
628         driver_data = (struct driver_data *) &d[3];
629         driver_data->packet = packet;
630         
631         if (packet->payload_length > 0) {
632                 payload_bus =
633                         dma_map_single(ohci->card.device, packet->payload,
634                                        packet->payload_length, DMA_TO_DEVICE);
635                 if (dma_mapping_error(payload_bus)) {
636                         packet->ack = RCODE_SEND_ERROR;
637                         return -1;
638                 }
639
640                 d[2].req_count    = cpu_to_le16(packet->payload_length);
641                 d[2].data_address = cpu_to_le32(payload_bus);
642                 last = &d[2];
643                 z = 3;
644         } else {
645                 last = &d[0];
646                 z = 2;
647         }
648
649         last->control |= cpu_to_le16(descriptor_output_last |
650                                      descriptor_irq_always |
651                                      descriptor_branch_always);
652
653         /* FIXME: Document how the locking works. */
654         if (ohci->generation != packet->generation) {
655                 packet->ack = RCODE_GENERATION;
656                 return -1;
657         }
658
659         context_append(ctx, d, z, 4 - z);
660
661         /* If the context isn't already running, start it up. */
662         reg = reg_read(ctx->ohci, control_set(ctx->regs));
663         if ((reg & CONTEXT_ACTIVE) == 0)
664                 context_run(ctx, 0);
665
666         return 0;
667 }
668
669 static int handle_at_packet(struct context *context,
670                             struct descriptor *d,
671                             struct descriptor *last)
672 {
673         struct driver_data *driver_data;
674         struct fw_packet *packet;
675         struct fw_ohci *ohci = context->ohci;
676         dma_addr_t payload_bus;
677         int evt;
678
679         if (last->transfer_status == 0)
680                 /* This descriptor isn't done yet, stop iteration. */
681                 return 0;
682
683         driver_data = (struct driver_data *) &d[3];
684         packet = driver_data->packet;
685         if (packet == NULL)
686                 /* This packet was cancelled, just continue. */
687                 return 1;
688
689         payload_bus = le32_to_cpu(last->data_address);
690         if (payload_bus != 0)
691                 dma_unmap_single(ohci->card.device, payload_bus,
692                                  packet->payload_length, DMA_TO_DEVICE);
693
694         evt = le16_to_cpu(last->transfer_status) & 0x1f;
695         packet->timestamp = le16_to_cpu(last->res_count);
696
697         switch (evt) {
698         case OHCI1394_evt_timeout:
699                 /* Async response transmit timed out. */
700                 packet->ack = RCODE_CANCELLED;
701                 break;
702
703         case OHCI1394_evt_flushed:
704                 /* The packet was flushed should give same error as
705                  * when we try to use a stale generation count. */
706                 packet->ack = RCODE_GENERATION;
707                 break;
708
709         case OHCI1394_evt_missing_ack:
710                 /* Using a valid (current) generation count, but the
711                  * node is not on the bus or not sending acks. */
712                 packet->ack = RCODE_NO_ACK;
713                 break;
714
715         case ACK_COMPLETE + 0x10:
716         case ACK_PENDING + 0x10:
717         case ACK_BUSY_X + 0x10:
718         case ACK_BUSY_A + 0x10:
719         case ACK_BUSY_B + 0x10:
720         case ACK_DATA_ERROR + 0x10:
721         case ACK_TYPE_ERROR + 0x10:
722                 packet->ack = evt - 0x10;
723                 break;
724
725         default:
726                 packet->ack = RCODE_SEND_ERROR;
727                 break;
728         }
729
730         packet->callback(packet, &ohci->card, packet->ack);
731
732         return 1;
733 }
734
735 #define header_get_destination(q)       (((q) >> 16) & 0xffff)
736 #define header_get_tcode(q)             (((q) >> 4) & 0x0f)
737 #define header_get_offset_high(q)       (((q) >> 0) & 0xffff)
738 #define header_get_data_length(q)       (((q) >> 16) & 0xffff)
739 #define header_get_extended_tcode(q)    (((q) >> 0) & 0xffff)
740
741 static void
742 handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
743 {
744         struct fw_packet response;
745         int tcode, length, i;
746
747         tcode = header_get_tcode(packet->header[0]);
748         if (TCODE_IS_BLOCK_PACKET(tcode))
749                 length = header_get_data_length(packet->header[3]);
750         else
751                 length = 4;
752
753         i = csr - CSR_CONFIG_ROM;
754         if (i + length > CONFIG_ROM_SIZE) {
755                 fw_fill_response(&response, packet->header,
756                                  RCODE_ADDRESS_ERROR, NULL, 0);
757         } else if (!TCODE_IS_READ_REQUEST(tcode)) {
758                 fw_fill_response(&response, packet->header,
759                                  RCODE_TYPE_ERROR, NULL, 0);
760         } else {
761                 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
762                                  (void *) ohci->config_rom + i, length);
763         }
764
765         fw_core_handle_response(&ohci->card, &response);
766 }
767
768 static void
769 handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
770 {
771         struct fw_packet response;
772         int tcode, length, ext_tcode, sel;
773         __be32 *payload, lock_old;
774         u32 lock_arg, lock_data;
775
776         tcode = header_get_tcode(packet->header[0]);
777         length = header_get_data_length(packet->header[3]);
778         payload = packet->payload;
779         ext_tcode = header_get_extended_tcode(packet->header[3]);
780
781         if (tcode == TCODE_LOCK_REQUEST &&
782             ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
783                 lock_arg = be32_to_cpu(payload[0]);
784                 lock_data = be32_to_cpu(payload[1]);
785         } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
786                 lock_arg = 0;
787                 lock_data = 0;
788         } else {
789                 fw_fill_response(&response, packet->header,
790                                  RCODE_TYPE_ERROR, NULL, 0);
791                 goto out;
792         }
793
794         sel = (csr - CSR_BUS_MANAGER_ID) / 4;
795         reg_write(ohci, OHCI1394_CSRData, lock_data);
796         reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
797         reg_write(ohci, OHCI1394_CSRControl, sel);
798
799         if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
800                 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
801         else
802                 fw_notify("swap not done yet\n");
803
804         fw_fill_response(&response, packet->header,
805                          RCODE_COMPLETE, &lock_old, sizeof lock_old);
806  out:
807         fw_core_handle_response(&ohci->card, &response);
808 }
809
810 static void
811 handle_local_request(struct context *ctx, struct fw_packet *packet)
812 {
813         u64 offset;
814         u32 csr;
815
816         packet->ack = ACK_PENDING;
817         packet->callback(packet, &ctx->ohci->card, packet->ack);
818
819         offset =
820                 ((unsigned long long)
821                  header_get_offset_high(packet->header[1]) << 32) |
822                 packet->header[2];
823         csr = offset - CSR_REGISTER_BASE;
824
825         /* Handle config rom reads. */
826         if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
827                 handle_local_rom(ctx->ohci, packet, csr);
828         else switch (csr) {
829         case CSR_BUS_MANAGER_ID:
830         case CSR_BANDWIDTH_AVAILABLE:
831         case CSR_CHANNELS_AVAILABLE_HI:
832         case CSR_CHANNELS_AVAILABLE_LO:
833                 handle_local_lock(ctx->ohci, packet, csr);
834                 break;
835         default:
836                 if (ctx == &ctx->ohci->at_request_ctx)
837                         fw_core_handle_request(&ctx->ohci->card, packet);
838                 else
839                         fw_core_handle_response(&ctx->ohci->card, packet);
840                 break;
841         }
842 }
843
844 static void
845 at_context_transmit(struct context *ctx, struct fw_packet *packet)
846 {
847         unsigned long flags;
848         int retval;
849
850         spin_lock_irqsave(&ctx->ohci->lock, flags);
851
852         if (header_get_destination(packet->header[0]) == ctx->ohci->node_id &&
853             ctx->ohci->generation == packet->generation) {
854                 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
855                 handle_local_request(ctx, packet);
856                 return;
857         }
858
859         retval = at_context_queue_packet(ctx, packet);
860         spin_unlock_irqrestore(&ctx->ohci->lock, flags);
861
862         if (retval < 0)
863                 packet->callback(packet, &ctx->ohci->card, packet->ack);
864         
865 }
866
867 static void bus_reset_tasklet(unsigned long data)
868 {
869         struct fw_ohci *ohci = (struct fw_ohci *)data;
870         int self_id_count, i, j, reg;
871         int generation, new_generation;
872         unsigned long flags;
873
874         reg = reg_read(ohci, OHCI1394_NodeID);
875         if (!(reg & OHCI1394_NodeID_idValid)) {
876                 fw_error("node ID not valid, new bus reset in progress\n");
877                 return;
878         }
879         ohci->node_id = reg & 0xffff;
880
881         /* The count in the SelfIDCount register is the number of
882          * bytes in the self ID receive buffer.  Since we also receive
883          * the inverted quadlets and a header quadlet, we shift one
884          * bit extra to get the actual number of self IDs. */
885
886         self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
887         generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
888
889         for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
890                 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
891                         fw_error("inconsistent self IDs\n");
892                 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
893         }
894
895         /* Check the consistency of the self IDs we just read.  The
896          * problem we face is that a new bus reset can start while we
897          * read out the self IDs from the DMA buffer. If this happens,
898          * the DMA buffer will be overwritten with new self IDs and we
899          * will read out inconsistent data.  The OHCI specification
900          * (section 11.2) recommends a technique similar to
901          * linux/seqlock.h, where we remember the generation of the
902          * self IDs in the buffer before reading them out and compare
903          * it to the current generation after reading them out.  If
904          * the two generations match we know we have a consistent set
905          * of self IDs. */
906
907         new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
908         if (new_generation != generation) {
909                 fw_notify("recursive bus reset detected, "
910                           "discarding self ids\n");
911                 return;
912         }
913
914         /* FIXME: Document how the locking works. */
915         spin_lock_irqsave(&ohci->lock, flags);
916
917         ohci->generation = generation;
918         context_stop(&ohci->at_request_ctx);
919         context_stop(&ohci->at_response_ctx);
920         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
921
922         /* This next bit is unrelated to the AT context stuff but we
923          * have to do it under the spinlock also.  If a new config rom
924          * was set up before this reset, the old one is now no longer
925          * in use and we can free it. Update the config rom pointers
926          * to point to the current config rom and clear the
927          * next_config_rom pointer so a new udpate can take place. */
928
929         if (ohci->next_config_rom != NULL) {
930                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
931                                   ohci->config_rom, ohci->config_rom_bus);
932                 ohci->config_rom      = ohci->next_config_rom;
933                 ohci->config_rom_bus  = ohci->next_config_rom_bus;
934                 ohci->next_config_rom = NULL;
935
936                 /* Restore config_rom image and manually update
937                  * config_rom registers.  Writing the header quadlet
938                  * will indicate that the config rom is ready, so we
939                  * do that last. */
940                 reg_write(ohci, OHCI1394_BusOptions,
941                           be32_to_cpu(ohci->config_rom[2]));
942                 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
943                 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
944         }
945
946         spin_unlock_irqrestore(&ohci->lock, flags);
947
948         fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
949                                  self_id_count, ohci->self_id_buffer);
950 }
951
952 static irqreturn_t irq_handler(int irq, void *data)
953 {
954         struct fw_ohci *ohci = data;
955         u32 event, iso_event;
956         int i;
957
958         event = reg_read(ohci, OHCI1394_IntEventClear);
959
960         if (!event)
961                 return IRQ_NONE;
962
963         reg_write(ohci, OHCI1394_IntEventClear, event);
964
965         if (event & OHCI1394_selfIDComplete)
966                 tasklet_schedule(&ohci->bus_reset_tasklet);
967
968         if (event & OHCI1394_RQPkt)
969                 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
970
971         if (event & OHCI1394_RSPkt)
972                 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
973
974         if (event & OHCI1394_reqTxComplete)
975                 tasklet_schedule(&ohci->at_request_ctx.tasklet);
976
977         if (event & OHCI1394_respTxComplete)
978                 tasklet_schedule(&ohci->at_response_ctx.tasklet);
979
980         iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
981         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
982
983         while (iso_event) {
984                 i = ffs(iso_event) - 1;
985                 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
986                 iso_event &= ~(1 << i);
987         }
988
989         iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
990         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
991
992         while (iso_event) {
993                 i = ffs(iso_event) - 1;
994                 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
995                 iso_event &= ~(1 << i);
996         }
997
998         return IRQ_HANDLED;
999 }
1000
1001 static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1002 {
1003         struct fw_ohci *ohci = fw_ohci(card);
1004         struct pci_dev *dev = to_pci_dev(card->device);
1005
1006         /* When the link is not yet enabled, the atomic config rom
1007          * update mechanism described below in ohci_set_config_rom()
1008          * is not active.  We have to update ConfigRomHeader and
1009          * BusOptions manually, and the write to ConfigROMmap takes
1010          * effect immediately.  We tie this to the enabling of the
1011          * link, so we have a valid config rom before enabling - the
1012          * OHCI requires that ConfigROMhdr and BusOptions have valid
1013          * values before enabling.
1014          *
1015          * However, when the ConfigROMmap is written, some controllers
1016          * always read back quadlets 0 and 2 from the config rom to
1017          * the ConfigRomHeader and BusOptions registers on bus reset.
1018          * They shouldn't do that in this initial case where the link
1019          * isn't enabled.  This means we have to use the same
1020          * workaround here, setting the bus header to 0 and then write
1021          * the right values in the bus reset tasklet.
1022          */
1023
1024         ohci->next_config_rom =
1025                 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1026                                    &ohci->next_config_rom_bus, GFP_KERNEL);
1027         if (ohci->next_config_rom == NULL)
1028                 return -ENOMEM;
1029
1030         memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1031         fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1032
1033         ohci->next_header = config_rom[0];
1034         ohci->next_config_rom[0] = 0;
1035         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1036         reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1037         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1038
1039         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1040
1041         if (request_irq(dev->irq, irq_handler,
1042                         IRQF_SHARED, ohci_driver_name, ohci)) {
1043                 fw_error("Failed to allocate shared interrupt %d.\n",
1044                          dev->irq);
1045                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1046                                   ohci->config_rom, ohci->config_rom_bus);
1047                 return -EIO;
1048         }
1049
1050         reg_write(ohci, OHCI1394_HCControlSet,
1051                   OHCI1394_HCControl_linkEnable |
1052                   OHCI1394_HCControl_BIBimageValid);
1053         flush_writes(ohci);
1054
1055         /* We are ready to go, initiate bus reset to finish the
1056          * initialization. */
1057
1058         fw_core_initiate_bus_reset(&ohci->card, 1);
1059
1060         return 0;
1061 }
1062
1063 static int
1064 ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1065 {
1066         struct fw_ohci *ohci;
1067         unsigned long flags;
1068         int retval = 0;
1069         __be32 *next_config_rom;
1070         dma_addr_t next_config_rom_bus;
1071
1072         ohci = fw_ohci(card);
1073
1074         /* When the OHCI controller is enabled, the config rom update
1075          * mechanism is a bit tricky, but easy enough to use.  See
1076          * section 5.5.6 in the OHCI specification.
1077          *
1078          * The OHCI controller caches the new config rom address in a
1079          * shadow register (ConfigROMmapNext) and needs a bus reset
1080          * for the changes to take place.  When the bus reset is
1081          * detected, the controller loads the new values for the
1082          * ConfigRomHeader and BusOptions registers from the specified
1083          * config rom and loads ConfigROMmap from the ConfigROMmapNext
1084          * shadow register. All automatically and atomically.
1085          *
1086          * Now, there's a twist to this story.  The automatic load of
1087          * ConfigRomHeader and BusOptions doesn't honor the
1088          * noByteSwapData bit, so with a be32 config rom, the
1089          * controller will load be32 values in to these registers
1090          * during the atomic update, even on litte endian
1091          * architectures.  The workaround we use is to put a 0 in the
1092          * header quadlet; 0 is endian agnostic and means that the
1093          * config rom isn't ready yet.  In the bus reset tasklet we
1094          * then set up the real values for the two registers.
1095          *
1096          * We use ohci->lock to avoid racing with the code that sets
1097          * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1098          */
1099
1100         next_config_rom =
1101                 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1102                                    &next_config_rom_bus, GFP_KERNEL);
1103         if (next_config_rom == NULL)
1104                 return -ENOMEM;
1105
1106         spin_lock_irqsave(&ohci->lock, flags);
1107
1108         if (ohci->next_config_rom == NULL) {
1109                 ohci->next_config_rom = next_config_rom;
1110                 ohci->next_config_rom_bus = next_config_rom_bus;
1111
1112                 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1113                 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1114                                   length * 4);
1115
1116                 ohci->next_header = config_rom[0];
1117                 ohci->next_config_rom[0] = 0;
1118
1119                 reg_write(ohci, OHCI1394_ConfigROMmap,
1120                           ohci->next_config_rom_bus);
1121         } else {
1122                 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1123                                   next_config_rom, next_config_rom_bus);
1124                 retval = -EBUSY;
1125         }
1126
1127         spin_unlock_irqrestore(&ohci->lock, flags);
1128
1129         /* Now initiate a bus reset to have the changes take
1130          * effect. We clean up the old config rom memory and DMA
1131          * mappings in the bus reset tasklet, since the OHCI
1132          * controller could need to access it before the bus reset
1133          * takes effect. */
1134         if (retval == 0)
1135                 fw_core_initiate_bus_reset(&ohci->card, 1);
1136
1137         return retval;
1138 }
1139
1140 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1141 {
1142         struct fw_ohci *ohci = fw_ohci(card);
1143
1144         at_context_transmit(&ohci->at_request_ctx, packet);
1145 }
1146
1147 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1148 {
1149         struct fw_ohci *ohci = fw_ohci(card);
1150
1151         at_context_transmit(&ohci->at_response_ctx, packet);
1152 }
1153
1154 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1155 {
1156         struct fw_ohci *ohci = fw_ohci(card);
1157         struct context *ctx = &ohci->at_request_ctx;
1158         struct driver_data *driver_data = packet->driver_data;
1159         int retval = -ENOENT;
1160
1161         tasklet_disable(&ctx->tasklet);
1162
1163         if (packet->ack != 0)
1164                 goto out;
1165
1166         driver_data->packet = NULL;
1167         packet->ack = RCODE_CANCELLED;
1168         packet->callback(packet, &ohci->card, packet->ack);
1169         retval = 0;
1170
1171  out:
1172         tasklet_enable(&ctx->tasklet);
1173
1174         return retval;
1175 }
1176
1177 static int
1178 ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1179 {
1180         struct fw_ohci *ohci = fw_ohci(card);
1181         unsigned long flags;
1182         int n, retval = 0;
1183
1184         /* FIXME:  Make sure this bitmask is cleared when we clear the busReset
1185          * interrupt bit.  Clear physReqResourceAllBuses on bus reset. */
1186
1187         spin_lock_irqsave(&ohci->lock, flags);
1188
1189         if (ohci->generation != generation) {
1190                 retval = -ESTALE;
1191                 goto out;
1192         }
1193
1194         /* NOTE, if the node ID contains a non-local bus ID, physical DMA is
1195          * enabled for _all_ nodes on remote buses. */
1196
1197         n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1198         if (n < 32)
1199                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1200         else
1201                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1202
1203         flush_writes(ohci);
1204  out:
1205         spin_unlock_irqrestore(&ohci->lock, flags);
1206         return retval;
1207 }
1208
1209 static int handle_ir_bufferfill_packet(struct context *context,
1210                                        struct descriptor *d,
1211                                        struct descriptor *last)
1212 {
1213         struct iso_context *ctx =
1214                 container_of(context, struct iso_context, context);
1215
1216         if (d->res_count > 0)
1217                 return 0;
1218
1219         if (le16_to_cpu(last->control) & descriptor_irq_always)
1220                 ctx->base.callback(&ctx->base,
1221                                    le16_to_cpu(last->res_count),
1222                                    0, NULL, ctx->base.callback_data);
1223
1224         return 1;
1225 }
1226
1227 static int handle_ir_dualbuffer_packet(struct context *context,
1228                                        struct descriptor *d,
1229                                        struct descriptor *last)
1230 {
1231         struct iso_context *ctx =
1232                 container_of(context, struct iso_context, context);
1233         struct db_descriptor *db = (struct db_descriptor *) d;
1234         size_t header_length;
1235
1236         if (db->first_res_count > 0 && db->second_res_count > 0)
1237                 /* This descriptor isn't done yet, stop iteration. */
1238                 return 0;
1239
1240         header_length = db->first_req_count - db->first_res_count;
1241         if (ctx->header_length + header_length <= PAGE_SIZE)
1242                 memcpy(ctx->header + ctx->header_length, db + 1, header_length);
1243         ctx->header_length += header_length;
1244
1245         if (le16_to_cpu(db->control) & descriptor_irq_always) {
1246                 ctx->base.callback(&ctx->base, 0,
1247                                    ctx->header_length, ctx->header,
1248                                    ctx->base.callback_data);
1249                 ctx->header_length = 0;
1250         }
1251
1252         return 1;
1253 }
1254
1255 static int handle_it_packet(struct context *context,
1256                             struct descriptor *d,
1257                             struct descriptor *last)
1258 {
1259         struct iso_context *ctx =
1260                 container_of(context, struct iso_context, context);
1261
1262         if (last->transfer_status == 0)
1263                 /* This descriptor isn't done yet, stop iteration. */
1264                 return 0;
1265
1266         if (le16_to_cpu(last->control) & descriptor_irq_always)
1267                 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1268                                    0, NULL, ctx->base.callback_data);
1269
1270         return 1;
1271 }
1272
1273 static struct fw_iso_context *
1274 ohci_allocate_iso_context(struct fw_card *card, int type,
1275                           int sync, int tags, size_t header_size)
1276 {
1277         struct fw_ohci *ohci = fw_ohci(card);
1278         struct iso_context *ctx, *list;
1279         descriptor_callback_t callback;
1280         u32 *mask, regs;
1281         unsigned long flags;
1282         int index, retval = -ENOMEM;
1283
1284         if (type == FW_ISO_CONTEXT_TRANSMIT) {
1285                 mask = &ohci->it_context_mask;
1286                 list = ohci->it_context_list;
1287                 callback = handle_it_packet;
1288         } else {
1289                 mask = &ohci->ir_context_mask;
1290                 list = ohci->ir_context_list;
1291                 if (header_size > 0)
1292                         callback = handle_ir_dualbuffer_packet;
1293                 else
1294                         callback = handle_ir_bufferfill_packet;
1295         }
1296
1297         if (callback == handle_ir_dualbuffer_packet &&
1298             ohci->version < OHCI_VERSION_1_1)
1299                 return ERR_PTR(-EINVAL);
1300
1301         spin_lock_irqsave(&ohci->lock, flags);
1302         index = ffs(*mask) - 1;
1303         if (index >= 0)
1304                 *mask &= ~(1 << index);
1305         spin_unlock_irqrestore(&ohci->lock, flags);
1306
1307         if (index < 0)
1308                 return ERR_PTR(-EBUSY);
1309
1310         if (type == FW_ISO_CONTEXT_TRANSMIT)
1311                 regs = OHCI1394_IsoXmitContextBase(index);
1312         else
1313                 regs = OHCI1394_IsoRcvContextBase(index);
1314
1315         ctx = &list[index];
1316         memset(ctx, 0, sizeof *ctx);
1317         ctx->header_length = 0;
1318         ctx->header = (void *) __get_free_page(GFP_KERNEL);
1319         if (ctx->header == NULL)
1320                 goto out;
1321
1322         retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1323                               regs, callback);
1324         if (retval < 0)
1325                 goto out_with_header;
1326
1327         return &ctx->base;
1328
1329  out_with_header:
1330         free_page((unsigned long)ctx->header);
1331  out:
1332         spin_lock_irqsave(&ohci->lock, flags);
1333         *mask |= 1 << index;
1334         spin_unlock_irqrestore(&ohci->lock, flags);
1335
1336         return ERR_PTR(retval);
1337 }
1338
1339 static int ohci_start_iso(struct fw_iso_context *base, s32 cycle)
1340 {
1341         struct iso_context *ctx = container_of(base, struct iso_context, base);
1342         struct fw_ohci *ohci = ctx->context.ohci;
1343         u32 cycle_match = 0, mode;
1344         int index;
1345
1346         if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1347                 index = ctx - ohci->it_context_list;
1348                 if (cycle > 0)
1349                         cycle_match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1350                                 (cycle & 0x7fff) << 16;
1351
1352                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1353                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1354                 context_run(&ctx->context, cycle_match);
1355         } else {
1356                 index = ctx - ohci->ir_context_list;
1357
1358                 if (ctx->base.header_size > 0)
1359                         mode = IR_CONTEXT_DUAL_BUFFER_MODE;
1360                 else
1361                         mode = IR_CONTEXT_BUFFER_FILL;
1362                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1363                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1364                 reg_write(ohci, context_match(ctx->context.regs),
1365                           (ctx->base.tags << 28) |
1366                           (ctx->base.sync << 8) | ctx->base.channel);
1367                 context_run(&ctx->context, mode);
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int ohci_stop_iso(struct fw_iso_context *base)
1374 {
1375         struct fw_ohci *ohci = fw_ohci(base->card);
1376         struct iso_context *ctx = container_of(base, struct iso_context, base);
1377         int index;
1378
1379         if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1380                 index = ctx - ohci->it_context_list;
1381                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1382         } else {
1383                 index = ctx - ohci->ir_context_list;
1384                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1385         }
1386         flush_writes(ohci);
1387         context_stop(&ctx->context);
1388
1389         return 0;
1390 }
1391
1392 static void ohci_free_iso_context(struct fw_iso_context *base)
1393 {
1394         struct fw_ohci *ohci = fw_ohci(base->card);
1395         struct iso_context *ctx = container_of(base, struct iso_context, base);
1396         unsigned long flags;
1397         int index;
1398
1399         ohci_stop_iso(base);
1400         context_release(&ctx->context);
1401         free_page((unsigned long)ctx->header);
1402
1403         spin_lock_irqsave(&ohci->lock, flags);
1404
1405         if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1406                 index = ctx - ohci->it_context_list;
1407                 ohci->it_context_mask |= 1 << index;
1408         } else {
1409                 index = ctx - ohci->ir_context_list;
1410                 ohci->ir_context_mask |= 1 << index;
1411         }
1412
1413         spin_unlock_irqrestore(&ohci->lock, flags);
1414 }
1415
1416 static int
1417 ohci_queue_iso_transmit(struct fw_iso_context *base,
1418                         struct fw_iso_packet *packet,
1419                         struct fw_iso_buffer *buffer,
1420                         unsigned long payload)
1421 {
1422         struct iso_context *ctx = container_of(base, struct iso_context, base);
1423         struct descriptor *d, *last, *pd;
1424         struct fw_iso_packet *p;
1425         __le32 *header;
1426         dma_addr_t d_bus, page_bus;
1427         u32 z, header_z, payload_z, irq;
1428         u32 payload_index, payload_end_index, next_page_index;
1429         int page, end_page, i, length, offset;
1430
1431         /* FIXME: Cycle lost behavior should be configurable: lose
1432          * packet, retransmit or terminate.. */
1433
1434         p = packet;
1435         payload_index = payload;
1436
1437         if (p->skip)
1438                 z = 1;
1439         else
1440                 z = 2;
1441         if (p->header_length > 0)
1442                 z++;
1443
1444         /* Determine the first page the payload isn't contained in. */
1445         end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1446         if (p->payload_length > 0)
1447                 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1448         else
1449                 payload_z = 0;
1450
1451         z += payload_z;
1452
1453         /* Get header size in number of descriptors. */
1454         header_z = DIV_ROUND_UP(p->header_length, sizeof *d);
1455
1456         d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1457         if (d == NULL)
1458                 return -ENOMEM;
1459
1460         if (!p->skip) {
1461                 d[0].control   = cpu_to_le16(descriptor_key_immediate);
1462                 d[0].req_count = cpu_to_le16(8);
1463
1464                 header = (__le32 *) &d[1];
1465                 header[0] = cpu_to_le32(it_header_sy(p->sy) |
1466                                         it_header_tag(p->tag) |
1467                                         it_header_tcode(TCODE_STREAM_DATA) |
1468                                         it_header_channel(ctx->base.channel) |
1469                                         it_header_speed(ctx->base.speed));
1470                 header[1] =
1471                         cpu_to_le32(it_header_data_length(p->header_length +
1472                                                           p->payload_length));
1473         }
1474
1475         if (p->header_length > 0) {
1476                 d[2].req_count    = cpu_to_le16(p->header_length);
1477                 d[2].data_address = cpu_to_le32(d_bus + z * sizeof *d);
1478                 memcpy(&d[z], p->header, p->header_length);
1479         }
1480
1481         pd = d + z - payload_z;
1482         payload_end_index = payload_index + p->payload_length;
1483         for (i = 0; i < payload_z; i++) {
1484                 page               = payload_index >> PAGE_SHIFT;
1485                 offset             = payload_index & ~PAGE_MASK;
1486                 next_page_index    = (page + 1) << PAGE_SHIFT;
1487                 length             =
1488                         min(next_page_index, payload_end_index) - payload_index;
1489                 pd[i].req_count    = cpu_to_le16(length);
1490
1491                 page_bus = page_private(buffer->pages[page]);
1492                 pd[i].data_address = cpu_to_le32(page_bus + offset);
1493
1494                 payload_index += length;
1495         }
1496
1497         if (p->interrupt)
1498                 irq = descriptor_irq_always;
1499         else
1500                 irq = descriptor_no_irq;
1501
1502         last = z == 2 ? d : d + z - 1;
1503         last->control |= cpu_to_le16(descriptor_output_last |
1504                                      descriptor_status |
1505                                      descriptor_branch_always |
1506                                      irq);
1507
1508         context_append(&ctx->context, d, z, header_z);
1509
1510         return 0;
1511 }
1512
1513 static int
1514 setup_wait_descriptor(struct context *ctx)
1515 {
1516         struct descriptor *d;
1517         dma_addr_t d_bus;
1518
1519         d = context_get_descriptors(ctx, 1, &d_bus);
1520         if (d == NULL)
1521                 return -ENOMEM;
1522
1523         d->control = cpu_to_le16(descriptor_input_more |
1524                                  descriptor_status |
1525                                  descriptor_branch_always |
1526                                  descriptor_wait);
1527
1528         context_append(ctx, d, 1, 0);
1529
1530         return 0;
1531 }
1532
1533 static int
1534 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1535                                   struct fw_iso_packet *packet,
1536                                   struct fw_iso_buffer *buffer,
1537                                   unsigned long payload)
1538 {
1539         struct iso_context *ctx = container_of(base, struct iso_context, base);
1540         struct db_descriptor *db = NULL;
1541         struct descriptor *d;
1542         struct fw_iso_packet *p;
1543         dma_addr_t d_bus, page_bus;
1544         u32 z, header_z, length, rest;
1545         int page, offset;
1546
1547         /* FIXME: Cycle lost behavior should be configurable: lose
1548          * packet, retransmit or terminate.. */
1549
1550         if (packet->skip && setup_wait_descriptor(&ctx->context) < 0)
1551                 return -ENOMEM;
1552
1553         p = packet;
1554         z = 2;
1555
1556         /* Get header size in number of descriptors. */
1557         header_z = DIV_ROUND_UP(p->header_length, sizeof *d);
1558         page     = payload >> PAGE_SHIFT;
1559         offset   = payload & ~PAGE_MASK;
1560         rest     = p->payload_length;
1561
1562         /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1563         /* FIXME: handle descriptor_wait */
1564         /* FIXME: make packet-per-buffer/dual-buffer a context option */
1565         while (rest > 0) {
1566                 d = context_get_descriptors(&ctx->context,
1567                                             z + header_z, &d_bus);
1568                 if (d == NULL)
1569                         return -ENOMEM;
1570
1571                 db = (struct db_descriptor *) d;
1572                 db->control = cpu_to_le16(descriptor_status |
1573                                           descriptor_branch_always);
1574                 db->first_size = cpu_to_le16(ctx->base.header_size);
1575                 db->first_req_count = cpu_to_le16(p->header_length);
1576                 db->first_res_count = db->first_req_count;
1577                 db->first_buffer = cpu_to_le32(d_bus + sizeof *db);
1578
1579                 if (offset + rest < PAGE_SIZE)
1580                         length = rest;
1581                 else
1582                         length = PAGE_SIZE - offset;
1583
1584                 db->second_req_count = cpu_to_le16(length);
1585                 db->second_res_count = db->second_req_count;
1586                 page_bus = page_private(buffer->pages[page]);
1587                 db->second_buffer = cpu_to_le32(page_bus + offset);
1588
1589                 if (p->interrupt && length == rest)
1590                         db->control |= cpu_to_le16(descriptor_irq_always);
1591
1592                 context_append(&ctx->context, d, z, header_z);
1593                 offset = (offset + length) & ~PAGE_MASK;
1594                 rest -= length;
1595                 page++;
1596         }
1597
1598         return 0;
1599 }
1600
1601 static int
1602 ohci_queue_iso_receive_bufferfill(struct fw_iso_context *base,
1603                                   struct fw_iso_packet *packet,
1604                                   struct fw_iso_buffer *buffer,
1605                                   unsigned long payload)
1606 {
1607         struct iso_context *ctx = container_of(base, struct iso_context, base);
1608         struct descriptor *d = NULL;
1609         dma_addr_t d_bus, page_bus;
1610         u32 length, rest;
1611         int page, offset;
1612
1613         page   = payload >> PAGE_SHIFT;
1614         offset = payload & ~PAGE_MASK;
1615         rest   = packet->payload_length;
1616
1617         if (packet->skip && setup_wait_descriptor(&ctx->context) < 0)
1618                 return -ENOMEM;
1619
1620         while (rest > 0) {
1621                 d = context_get_descriptors(&ctx->context, 1, &d_bus);
1622                 if (d == NULL)
1623                         return -ENOMEM;
1624
1625                 d->control = cpu_to_le16(descriptor_input_more |
1626                                          descriptor_status |
1627                                          descriptor_branch_always);
1628
1629                 if (offset + rest < PAGE_SIZE)
1630                         length = rest;
1631                 else
1632                         length = PAGE_SIZE - offset;
1633
1634                 page_bus = page_private(buffer->pages[page]);
1635                 d->data_address = cpu_to_le32(page_bus + offset);
1636                 d->req_count = cpu_to_le16(length);
1637                 d->res_count = cpu_to_le16(length);
1638
1639                 if (packet->interrupt && length == rest)
1640                         d->control |= cpu_to_le16(descriptor_irq_always);
1641
1642                 context_append(&ctx->context, d, 1, 0);
1643
1644                 offset = (offset + length) & ~PAGE_MASK;
1645                 rest -= length;
1646                 page++;
1647         }
1648
1649         return 0;
1650 }
1651
1652 static int
1653 ohci_queue_iso(struct fw_iso_context *base,
1654                struct fw_iso_packet *packet,
1655                struct fw_iso_buffer *buffer,
1656                unsigned long payload)
1657 {
1658         struct iso_context *ctx = container_of(base, struct iso_context, base);
1659
1660         if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1661                 return ohci_queue_iso_transmit(base, packet, buffer, payload);
1662         else if (base->header_size == 0)
1663                 return ohci_queue_iso_receive_bufferfill(base, packet,
1664                                                          buffer, payload);
1665         else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1666                 return ohci_queue_iso_receive_dualbuffer(base, packet,
1667                                                          buffer, payload);
1668         else
1669                 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1670                 return -EINVAL;
1671 }
1672
1673 static const struct fw_card_driver ohci_driver = {
1674         .name                   = ohci_driver_name,
1675         .enable                 = ohci_enable,
1676         .update_phy_reg         = ohci_update_phy_reg,
1677         .set_config_rom         = ohci_set_config_rom,
1678         .send_request           = ohci_send_request,
1679         .send_response          = ohci_send_response,
1680         .cancel_packet          = ohci_cancel_packet,
1681         .enable_phys_dma        = ohci_enable_phys_dma,
1682
1683         .allocate_iso_context   = ohci_allocate_iso_context,
1684         .free_iso_context       = ohci_free_iso_context,
1685         .queue_iso              = ohci_queue_iso,
1686         .start_iso              = ohci_start_iso,
1687         .stop_iso               = ohci_stop_iso,
1688 };
1689
1690 static int software_reset(struct fw_ohci *ohci)
1691 {
1692         int i;
1693
1694         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1695
1696         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1697                 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1698                      OHCI1394_HCControl_softReset) == 0)
1699                         return 0;
1700                 msleep(1);
1701         }
1702
1703         return -EBUSY;
1704 }
1705
1706 /* ---------- pci subsystem interface ---------- */
1707
1708 enum {
1709         CLEANUP_SELF_ID,
1710         CLEANUP_REGISTERS,
1711         CLEANUP_IOMEM,
1712         CLEANUP_DISABLE,
1713         CLEANUP_PUT_CARD,
1714 };
1715
1716 static int cleanup(struct fw_ohci *ohci, int stage, int code)
1717 {
1718         struct pci_dev *dev = to_pci_dev(ohci->card.device);
1719
1720         switch (stage) {
1721         case CLEANUP_SELF_ID:
1722                 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1723                                   ohci->self_id_cpu, ohci->self_id_bus);
1724         case CLEANUP_REGISTERS:
1725                 kfree(ohci->it_context_list);
1726                 kfree(ohci->ir_context_list);
1727                 pci_iounmap(dev, ohci->registers);
1728         case CLEANUP_IOMEM:
1729                 pci_release_region(dev, 0);
1730         case CLEANUP_DISABLE:
1731                 pci_disable_device(dev);
1732         case CLEANUP_PUT_CARD:
1733                 fw_card_put(&ohci->card);
1734         }
1735
1736         return code;
1737 }
1738
1739 static int __devinit
1740 pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1741 {
1742         struct fw_ohci *ohci;
1743         u32 bus_options, max_receive, link_speed;
1744         u64 guid;
1745         int error_code;
1746         size_t size;
1747
1748         ohci = kzalloc(sizeof *ohci, GFP_KERNEL);
1749         if (ohci == NULL) {
1750                 fw_error("Could not malloc fw_ohci data.\n");
1751                 return -ENOMEM;
1752         }
1753
1754         fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1755
1756         if (pci_enable_device(dev)) {
1757                 fw_error("Failed to enable OHCI hardware.\n");
1758                 return cleanup(ohci, CLEANUP_PUT_CARD, -ENODEV);
1759         }
1760
1761         pci_set_master(dev);
1762         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1763         pci_set_drvdata(dev, ohci);
1764
1765         spin_lock_init(&ohci->lock);
1766
1767         tasklet_init(&ohci->bus_reset_tasklet,
1768                      bus_reset_tasklet, (unsigned long)ohci);
1769
1770         if (pci_request_region(dev, 0, ohci_driver_name)) {
1771                 fw_error("MMIO resource unavailable\n");
1772                 return cleanup(ohci, CLEANUP_DISABLE, -EBUSY);
1773         }
1774
1775         ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1776         if (ohci->registers == NULL) {
1777                 fw_error("Failed to remap registers\n");
1778                 return cleanup(ohci, CLEANUP_IOMEM, -ENXIO);
1779         }
1780
1781         if (software_reset(ohci)) {
1782                 fw_error("Failed to reset ohci card.\n");
1783                 return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY);
1784         }
1785
1786         /* Now enable LPS, which we need in order to start accessing
1787          * most of the registers.  In fact, on some cards (ALI M5251),
1788          * accessing registers in the SClk domain without LPS enabled
1789          * will lock up the machine.  Wait 50msec to make sure we have
1790          * full link enabled.  */
1791         reg_write(ohci, OHCI1394_HCControlSet,
1792                   OHCI1394_HCControl_LPS |
1793                   OHCI1394_HCControl_postedWriteEnable);
1794         flush_writes(ohci);
1795         msleep(50);
1796
1797         reg_write(ohci, OHCI1394_HCControlClear,
1798                   OHCI1394_HCControl_noByteSwapData);
1799
1800         reg_write(ohci, OHCI1394_LinkControlSet,
1801                   OHCI1394_LinkControl_rcvSelfID |
1802                   OHCI1394_LinkControl_cycleTimerEnable |
1803                   OHCI1394_LinkControl_cycleMaster);
1804
1805         ar_context_init(&ohci->ar_request_ctx, ohci,
1806                         OHCI1394_AsReqRcvContextControlSet);
1807
1808         ar_context_init(&ohci->ar_response_ctx, ohci,
1809                         OHCI1394_AsRspRcvContextControlSet);
1810
1811         context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1812                      OHCI1394_AsReqTrContextControlSet, handle_at_packet);
1813
1814         context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1815                      OHCI1394_AsRspTrContextControlSet, handle_at_packet);
1816
1817         reg_write(ohci, OHCI1394_ATRetries,
1818                   OHCI1394_MAX_AT_REQ_RETRIES |
1819                   (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1820                   (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1821
1822         reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1823         ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1824         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1825         size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1826         ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1827
1828         reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1829         ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1830         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1831         size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1832         ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1833
1834         if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1835                 fw_error("Out of memory for it/ir contexts.\n");
1836                 return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM);
1837         }
1838
1839         /* self-id dma buffer allocation */
1840         ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1841                                                SELF_ID_BUF_SIZE,
1842                                                &ohci->self_id_bus,
1843                                                GFP_KERNEL);
1844         if (ohci->self_id_cpu == NULL) {
1845                 fw_error("Out of memory for self ID buffer.\n");
1846                 return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM);
1847         }
1848
1849         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1850         reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1851         reg_write(ohci, OHCI1394_IntEventClear, ~0);
1852         reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1853         reg_write(ohci, OHCI1394_IntMaskSet,
1854                   OHCI1394_selfIDComplete |
1855                   OHCI1394_RQPkt | OHCI1394_RSPkt |
1856                   OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1857                   OHCI1394_isochRx | OHCI1394_isochTx |
1858                   OHCI1394_masterIntEnable);
1859
1860         bus_options = reg_read(ohci, OHCI1394_BusOptions);
1861         max_receive = (bus_options >> 12) & 0xf;
1862         link_speed = bus_options & 0x7;
1863         guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1864                 reg_read(ohci, OHCI1394_GUIDLo);
1865
1866         error_code = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1867         if (error_code < 0)
1868                 return cleanup(ohci, CLEANUP_SELF_ID, error_code);
1869
1870         ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1871         fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1872                   dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
1873
1874         return 0;
1875 }
1876
1877 static void pci_remove(struct pci_dev *dev)
1878 {
1879         struct fw_ohci *ohci;
1880
1881         ohci = pci_get_drvdata(dev);
1882         reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1883         flush_writes(ohci);
1884         fw_core_remove_card(&ohci->card);
1885
1886         /* FIXME: Fail all pending packets here, now that the upper
1887          * layers can't queue any more. */
1888
1889         software_reset(ohci);
1890         free_irq(dev->irq, ohci);
1891         cleanup(ohci, CLEANUP_SELF_ID, 0);
1892
1893         fw_notify("Removed fw-ohci device.\n");
1894 }
1895
1896 static struct pci_device_id pci_table[] = {
1897         { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1898         { }
1899 };
1900
1901 MODULE_DEVICE_TABLE(pci, pci_table);
1902
1903 static struct pci_driver fw_ohci_pci_driver = {
1904         .name           = ohci_driver_name,
1905         .id_table       = pci_table,
1906         .probe          = pci_probe,
1907         .remove         = pci_remove,
1908 };
1909
1910 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1911 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1912 MODULE_LICENSE("GPL");
1913
1914 static int __init fw_ohci_init(void)
1915 {
1916         return pci_register_driver(&fw_ohci_pci_driver);
1917 }
1918
1919 static void __exit fw_ohci_cleanup(void)
1920 {
1921         pci_unregister_driver(&fw_ohci_pci_driver);
1922 }
1923
1924 module_init(fw_ohci_init);
1925 module_exit(fw_ohci_cleanup);