1 /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/skbuff.h>
10 #include <linux/netdevice.h>
11 #include <linux/genhd.h>
12 #include <asm/unaligned.h>
15 #define TIMERTICK (HZ / 10)
16 #define MINTIMER (2 * TIMERTICK)
17 #define MAXTIMER (HZ << 1)
19 static int aoe_deadsecs = 60 * 3;
20 module_param(aoe_deadsecs, int, 0644);
21 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
28 skb = alloc_skb(len, GFP_ATOMIC);
30 skb_reset_mac_header(skb);
31 skb_reset_network_header(skb);
32 skb->protocol = __constant_htons(ETH_P_AOE);
34 skb->next = skb->prev = NULL;
36 /* tell the network layer not to perform IP checksums
37 * or to get the NIC to do it
39 skb->ip_summed = CHECKSUM_NONE;
45 getframe(struct aoedev *d, int tag)
58 * Leave the top bit clear so we have tagspace for userland.
59 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
60 * This driver reserves tag -1 to mean "unused frame."
63 newtag(struct aoedev *d)
68 return n |= (++d->lasttag & 0x7fff) << 16;
72 aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
74 u32 host_tag = newtag(d);
76 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
77 memcpy(h->dst, d->addr, sizeof h->dst);
78 h->type = __constant_cpu_to_be16(ETH_P_AOE);
80 h->major = cpu_to_be16(d->aoemajor);
81 h->minor = d->aoeminor;
83 h->tag = cpu_to_be32(host_tag);
89 put_lba(struct aoe_atahdr *ah, sector_t lba)
100 aoecmd_ata_rw(struct aoedev *d, struct frame *f)
103 struct aoe_atahdr *ah;
107 register sector_t sector;
108 char writebit, extbit;
115 sector = buf->sector;
116 bcnt = buf->bv_resid;
117 if (bcnt > d->maxbcnt)
120 /* initialize the headers & frame */
123 ah = (struct aoe_atahdr *) (h+1);
124 skb_put(skb, sizeof *h + sizeof *ah);
125 memset(h, 0, skb->len);
126 f->tag = aoehdr_atainit(d, h);
129 f->bufaddr = buf->bufaddr;
133 /* set up ata header */
134 ah->scnt = bcnt >> 9;
136 if (d->flags & DEVFL_EXT) {
137 ah->aflags |= AOEAFL_EXT;
141 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
144 if (bio_data_dir(buf->bio) == WRITE) {
145 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
146 offset_in_page(f->bufaddr), bcnt);
147 ah->aflags |= AOEAFL_WRITE;
149 skb->data_len = bcnt;
154 ah->cmdstat = WIN_READ | writebit | extbit;
156 /* mark all tracking fields and load out */
157 buf->nframesout += 1;
158 buf->bufaddr += bcnt;
159 buf->bv_resid -= bcnt;
160 /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
162 buf->sector += bcnt >> 9;
163 if (buf->resid == 0) {
165 } else if (buf->bv_resid == 0) {
167 WARN_ON(buf->bv->bv_len == 0);
168 buf->bv_resid = buf->bv->bv_len;
169 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
173 skb = skb_clone(skb, GFP_ATOMIC);
177 d->sendq_tl->next = skb;
183 /* some callers cannot sleep, and they can call this function,
184 * transmitting the packets later, when interrupts are on
186 static struct sk_buff *
187 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
190 struct aoe_cfghdr *ch;
191 struct sk_buff *skb, *sl, *sl_tail;
192 struct net_device *ifp;
196 read_lock(&dev_base_lock);
197 for_each_netdev(ifp) {
199 if (!is_aoe_netif(ifp))
202 skb = new_skb(sizeof *h + sizeof *ch);
204 printk(KERN_INFO "aoe: skb alloc failure\n");
207 skb_put(skb, sizeof *h + sizeof *ch);
212 memset(h, 0, sizeof *h + sizeof *ch);
214 memset(h->dst, 0xff, sizeof h->dst);
215 memcpy(h->src, ifp->dev_addr, sizeof h->src);
216 h->type = __constant_cpu_to_be16(ETH_P_AOE);
218 h->major = cpu_to_be16(aoemajor);
227 read_unlock(&dev_base_lock);
234 static struct frame *
235 freeframe(struct aoedev *d)
243 if (f->tag != FREETAG)
245 if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
246 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
252 if (n == d->nframes) /* wait for network layer */
253 d->flags |= DEVFL_KICKME;
258 /* enters with d->lock held */
260 aoecmd_work(struct aoedev *d)
265 if (d->flags & DEVFL_PAUSE) {
266 if (!aoedev_isbusy(d))
267 d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
268 d->aoeminor, &d->sendq_tl);
276 if (d->inprocess == NULL) {
277 if (list_empty(&d->bufq))
279 buf = container_of(d->bufq.next, struct buf, bufs);
280 list_del(d->bufq.next);
281 /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
289 rexmit(struct aoedev *d, struct frame *f)
293 struct aoe_atahdr *ah;
299 snprintf(buf, sizeof buf,
300 "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
302 d->aoemajor, d->aoeminor, f->tag, jiffies, n);
307 ah = (struct aoe_atahdr *) (h+1);
309 h->tag = cpu_to_be32(n);
310 memcpy(h->dst, d->addr, sizeof h->dst);
311 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
313 n = DEFAULTBCNT / 512;
316 if (ah->aflags & AOEAFL_WRITE) {
317 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
318 offset_in_page(f->bufaddr), DEFAULTBCNT);
319 skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
320 skb->data_len = DEFAULTBCNT;
322 if (++d->lostjumbo > (d->nframes << 1))
323 if (d->maxbcnt != DEFAULTBCNT) {
324 printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
325 d->aoemajor, d->aoeminor, d->ifp->name);
326 d->maxbcnt = DEFAULTBCNT;
327 d->flags |= DEVFL_MAXBCNT;
332 skb = skb_clone(skb, GFP_ATOMIC);
336 d->sendq_tl->next = skb;
347 n = jiffies & 0xffff;
355 rexmit_timer(ulong vp)
360 register long timeout;
363 d = (struct aoedev *) vp;
366 /* timeout is always ~150% of the moving average */
368 timeout += timeout >> 1;
370 spin_lock_irqsave(&d->lock, flags);
372 if (d->flags & DEVFL_TKILL) {
373 spin_unlock_irqrestore(&d->lock, flags);
379 if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
380 n = f->waited += timeout;
382 if (n > aoe_deadsecs) { /* waited too long for response */
389 if (d->flags & DEVFL_KICKME) {
390 d->flags &= ~DEVFL_KICKME;
395 d->sendq_hd = d->sendq_tl = NULL;
399 d->rttavg = MAXTIMER;
402 d->timer.expires = jiffies + TIMERTICK;
403 add_timer(&d->timer);
405 spin_unlock_irqrestore(&d->lock, flags);
410 /* this function performs work that has been deferred until sleeping is OK
413 aoecmd_sleepwork(struct work_struct *work)
415 struct aoedev *d = container_of(work, struct aoedev, work);
417 if (d->flags & DEVFL_GDALLOC)
420 if (d->flags & DEVFL_NEWSIZE) {
421 struct block_device *bd;
425 ssize = d->gd->capacity;
426 bd = bdget_disk(d->gd, 0);
429 mutex_lock(&bd->bd_inode->i_mutex);
430 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
431 mutex_unlock(&bd->bd_inode->i_mutex);
434 spin_lock_irqsave(&d->lock, flags);
435 d->flags |= DEVFL_UP;
436 d->flags &= ~DEVFL_NEWSIZE;
437 spin_unlock_irqrestore(&d->lock, flags);
442 ataid_complete(struct aoedev *d, unsigned char *id)
447 /* word 83: command set supported */
448 n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
450 /* word 86: command set/feature enabled */
451 n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
453 if (n & (1<<10)) { /* bit 10: LBA 48 */
454 d->flags |= DEVFL_EXT;
456 /* word 100: number lba48 sectors */
457 ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
459 /* set as in ide-disk.c:init_idedisk_capacity */
460 d->geo.cylinders = ssize;
461 d->geo.cylinders /= (255 * 63);
465 d->flags &= ~DEVFL_EXT;
467 /* number lba28 sectors */
468 ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
470 /* NOTE: obsolete in ATA 6 */
471 d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
472 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
473 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
476 if (d->ssize != ssize)
477 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
478 (unsigned long long)mac_addr(d->addr),
479 d->aoemajor, d->aoeminor,
480 d->fw_ver, (long long)ssize);
484 d->gd->capacity = ssize;
485 d->flags |= DEVFL_NEWSIZE;
487 if (d->flags & DEVFL_GDALLOC) {
488 printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
489 d->aoemajor, d->aoeminor,
490 "it's already on! This shouldn't happen.\n");
493 d->flags |= DEVFL_GDALLOC;
495 schedule_work(&d->work);
499 calc_rttavg(struct aoedev *d, int rtt)
508 else if (n > MAXTIMER)
510 d->mintimer += (n - d->mintimer) >> 1;
511 } else if (n < d->mintimer)
513 else if (n > MAXTIMER)
516 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
522 aoecmd_ata_rsp(struct sk_buff *skb)
525 struct aoe_hdr *hin, *hout;
526 struct aoe_atahdr *ahin, *ahout;
536 aoemajor = be16_to_cpu(get_unaligned(&hin->major));
537 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
539 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
540 "for unknown device %d.%d\n",
541 aoemajor, hin->minor);
546 spin_lock_irqsave(&d->lock, flags);
548 n = be32_to_cpu(get_unaligned(&hin->tag));
551 calc_rttavg(d, -tsince(n));
552 spin_unlock_irqrestore(&d->lock, flags);
553 snprintf(ebuf, sizeof ebuf,
554 "%15s e%d.%d tag=%08x@%08lx\n",
556 be16_to_cpu(get_unaligned(&hin->major)),
558 be32_to_cpu(get_unaligned(&hin->tag)),
564 calc_rttavg(d, tsince(f->tag));
566 ahin = (struct aoe_atahdr *) (hin+1);
567 hout = aoe_hdr(f->skb);
568 ahout = (struct aoe_atahdr *) (hout+1);
571 if (ahout->cmdstat == WIN_IDENTIFY)
572 d->flags &= ~DEVFL_PAUSE;
573 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
575 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
576 ahout->cmdstat, ahin->cmdstat,
577 d->aoemajor, d->aoeminor);
579 buf->flags |= BUFFL_FAIL;
581 n = ahout->scnt << 9;
582 switch (ahout->cmdstat) {
585 if (skb->len - sizeof *hin - sizeof *ahin < n) {
587 "aoe: runt data size in read. skb->len=%d\n",
589 /* fail frame f? just returning will rexmit. */
590 spin_unlock_irqrestore(&d->lock, flags);
593 memcpy(f->bufaddr, ahin+1, n);
599 put_lba(ahout, f->lba += ahout->scnt);
603 ahout->scnt = n >> 9;
604 if (ahout->aflags & AOEAFL_WRITE) {
605 skb_fill_page_desc(skb, 0,
606 virt_to_page(f->bufaddr),
607 offset_in_page(f->bufaddr), n);
608 skb->len = sizeof *hout + sizeof *ahout + n;
612 hout->tag = cpu_to_be32(f->tag);
614 skb = skb_clone(skb, GFP_ATOMIC);
615 spin_unlock_irqrestore(&d->lock, flags);
624 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
626 "aoe: runt data size in ataid. skb->len=%d\n",
628 spin_unlock_irqrestore(&d->lock, flags);
631 ataid_complete(d, (char *) (ahin+1));
635 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
637 be16_to_cpu(get_unaligned(&hin->major)),
643 buf->nframesout -= 1;
644 if (buf->nframesout == 0 && buf->resid == 0) {
645 unsigned long duration = jiffies - buf->start_time;
646 unsigned long n_sect = buf->bio->bi_size >> 9;
647 struct gendisk *disk = d->gd;
648 const int rw = bio_data_dir(buf->bio);
650 disk_stat_inc(disk, ios[rw]);
651 disk_stat_add(disk, ticks[rw], duration);
652 disk_stat_add(disk, sectors[rw], n_sect);
653 disk_stat_add(disk, io_ticks, duration);
654 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
655 bio_endio(buf->bio, buf->bio->bi_size, n);
656 mempool_free(buf, d->bufpool);
665 d->sendq_hd = d->sendq_tl = NULL;
667 spin_unlock_irqrestore(&d->lock, flags);
672 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
676 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
682 * Since we only call this in one place (and it only prepares one frame)
683 * we just return the skb. Usually we'd chain it up to the aoedev sendq.
685 static struct sk_buff *
686 aoecmd_ata_id(struct aoedev *d)
689 struct aoe_atahdr *ah;
695 printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
699 /* initialize the headers & frame */
702 ah = (struct aoe_atahdr *) (h+1);
703 skb_put(skb, sizeof *h + sizeof *ah);
704 memset(h, 0, skb->len);
705 f->tag = aoehdr_atainit(d, h);
708 /* set up ata header */
710 ah->cmdstat = WIN_IDENTIFY;
715 d->rttavg = MAXTIMER;
716 d->timer.function = rexmit_timer;
718 return skb_clone(skb, GFP_ATOMIC);
722 aoecmd_cfg_rsp(struct sk_buff *skb)
726 struct aoe_cfghdr *ch;
727 ulong flags, sysminor, aoemajor;
729 enum { MAXFRAMES = 16 };
733 ch = (struct aoe_cfghdr *) (h+1);
736 * Enough people have their dip switches set backwards to
737 * warrant a loud message for this special case.
739 aoemajor = be16_to_cpu(get_unaligned(&h->major));
740 if (aoemajor == 0xfff) {
741 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
742 "Check shelf dip switches.\n");
746 sysminor = SYSMINOR(aoemajor, h->minor);
747 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
748 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
749 aoemajor, (int) h->minor);
753 n = be16_to_cpu(ch->bufcnt);
754 if (n > MAXFRAMES) /* keep it reasonable */
757 d = aoedev_by_sysminor_m(sysminor, n);
759 printk(KERN_INFO "aoe: device sysminor_m failure\n");
763 spin_lock_irqsave(&d->lock, flags);
765 /* permit device to migrate mac and network interface */
767 memcpy(d->addr, h->src, sizeof d->addr);
768 if (!(d->flags & DEVFL_MAXBCNT)) {
770 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
774 n = n ? n * 512 : DEFAULTBCNT;
775 if (n != d->maxbcnt) {
777 "aoe: e%ld.%ld: setting %d byte data frames on %s\n",
778 d->aoemajor, d->aoeminor, n, d->ifp->name);
783 /* don't change users' perspective */
784 if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
785 spin_unlock_irqrestore(&d->lock, flags);
788 d->flags |= DEVFL_PAUSE; /* force pause */
789 d->mintimer = MINTIMER;
790 d->fw_ver = be16_to_cpu(ch->fwver);
792 /* check for already outstanding ataid */
793 sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
795 spin_unlock_irqrestore(&d->lock, flags);