1 /* ldc.c: Logical Domain Channel link-layer protocol driver.
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/scatterlist.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/init.h>
18 #include <asm/hypervisor.h>
19 #include <asm/iommu.h>
22 #include <asm/mdesc.h>
24 #define DRV_MODULE_NAME "ldc"
25 #define PFX DRV_MODULE_NAME ": "
26 #define DRV_MODULE_VERSION "1.0"
27 #define DRV_MODULE_RELDATE "June 25, 2007"
29 static char version[] __devinitdata =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
31 #define LDC_PACKET_SIZE 64
33 /* Packet header layout for unreliable and reliable mode frames.
34 * When in RAW mode, packets are simply straight 64-byte payloads
49 #define LDC_VERS 0x01 /* Link Version */
50 #define LDC_RTS 0x02 /* Request To Send */
51 #define LDC_RTR 0x03 /* Ready To Receive */
52 #define LDC_RDX 0x04 /* Ready for Data eXchange */
53 #define LDC_CTRL_MSK 0x0f
57 #define LDC_FRAG_MASK 0xc0
58 #define LDC_START 0x40
64 u8 u_data[LDC_PACKET_SIZE - 8];
68 u8 r_data[LDC_PACKET_SIZE - 8 - 8];
78 /* Ordered from largest major to lowest. */
79 static struct ldc_version ver_arr[] = {
80 { .major = 1, .minor = 0 },
83 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
84 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
89 int (*write)(struct ldc_channel *, const void *, unsigned int);
90 int (*read)(struct ldc_channel *, void *, unsigned int);
93 static const struct ldc_mode_ops raw_ops;
94 static const struct ldc_mode_ops nonraw_ops;
95 static const struct ldc_mode_ops stream_ops;
97 int ldom_domaining_enabled;
100 /* Protects arena alloc/free. */
102 struct iommu_arena arena;
103 struct ldc_mtable_entry *page_table;
107 /* Protects all operations that depend upon channel state. */
116 struct ldc_packet *tx_base;
117 unsigned long tx_head;
118 unsigned long tx_tail;
119 unsigned long tx_num_entries;
122 unsigned long tx_acked;
124 struct ldc_packet *rx_base;
125 unsigned long rx_head;
126 unsigned long rx_tail;
127 unsigned long rx_num_entries;
133 unsigned long chan_state;
135 struct ldc_channel_config cfg;
138 const struct ldc_mode_ops *mops;
140 struct ldc_iommu iommu;
142 struct ldc_version ver;
145 #define LDC_HS_CLOSED 0x00
146 #define LDC_HS_OPEN 0x01
147 #define LDC_HS_GOTVERS 0x02
148 #define LDC_HS_SENTRTR 0x03
149 #define LDC_HS_GOTRTR 0x04
150 #define LDC_HS_COMPLETE 0x10
153 #define LDC_FLAG_ALLOCED_QUEUES 0x01
154 #define LDC_FLAG_REGISTERED_QUEUES 0x02
155 #define LDC_FLAG_REGISTERED_IRQS 0x04
156 #define LDC_FLAG_RESET 0x10
161 struct hlist_head mh_list;
163 struct hlist_node list;
166 #define ldcdbg(TYPE, f, a...) \
167 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
168 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
171 static const char *state_to_str(u8 state)
174 case LDC_STATE_INVALID:
178 case LDC_STATE_BOUND:
180 case LDC_STATE_READY:
182 case LDC_STATE_CONNECTED:
189 static void ldc_set_state(struct ldc_channel *lp, u8 state)
191 ldcdbg(STATE, "STATE (%s) --> (%s)\n",
192 state_to_str(lp->state),
193 state_to_str(state));
198 static unsigned long __advance(unsigned long off, unsigned long num_entries)
200 off += LDC_PACKET_SIZE;
201 if (off == (num_entries * LDC_PACKET_SIZE))
207 static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off)
209 return __advance(off, lp->rx_num_entries);
212 static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off)
214 return __advance(off, lp->tx_num_entries);
217 static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp,
218 unsigned long *new_tail)
220 struct ldc_packet *p;
223 t = tx_advance(lp, lp->tx_tail);
224 if (t == lp->tx_head)
230 return p + (lp->tx_tail / LDC_PACKET_SIZE);
233 /* When we are in reliable or stream mode, have to track the next packet
234 * we haven't gotten an ACK for in the TX queue using tx_acked. We have
235 * to be careful not to stomp over the queue past that point. During
236 * the handshake, we don't have TX data packets pending in the queue
237 * and that's why handshake_get_tx_packet() need not be mindful of
240 static unsigned long head_for_data(struct ldc_channel *lp)
242 if (lp->cfg.mode == LDC_MODE_STREAM)
247 static int tx_has_space_for(struct ldc_channel *lp, unsigned int size)
249 unsigned long limit, tail, new_tail, diff;
252 limit = head_for_data(lp);
254 new_tail = tx_advance(lp, tail);
255 if (new_tail == limit)
258 if (limit > new_tail)
259 diff = limit - new_tail;
262 ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail));
263 diff /= LDC_PACKET_SIZE;
266 if (diff * mss < size)
272 static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp,
273 unsigned long *new_tail)
275 struct ldc_packet *p;
278 h = head_for_data(lp);
279 t = tx_advance(lp, lp->tx_tail);
286 return p + (lp->tx_tail / LDC_PACKET_SIZE);
289 static int set_tx_tail(struct ldc_channel *lp, unsigned long tail)
291 unsigned long orig_tail = lp->tx_tail;
295 while (limit-- > 0) {
298 err = sun4v_ldc_tx_set_qtail(lp->id, tail);
302 if (err != HV_EWOULDBLOCK) {
303 lp->tx_tail = orig_tail;
309 lp->tx_tail = orig_tail;
313 /* This just updates the head value in the hypervisor using
314 * a polling loop with a timeout. The caller takes care of
315 * upating software state representing the head change, if any.
317 static int __set_rx_head(struct ldc_channel *lp, unsigned long head)
321 while (limit-- > 0) {
324 err = sun4v_ldc_rx_set_qhead(lp->id, head);
328 if (err != HV_EWOULDBLOCK)
337 static int send_tx_packet(struct ldc_channel *lp,
338 struct ldc_packet *p,
339 unsigned long new_tail)
341 BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
343 return set_tx_tail(lp, new_tail);
346 static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp,
348 void *data, int dlen,
349 unsigned long *new_tail)
351 struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
354 memset(p, 0, sizeof(*p));
359 memcpy(p->u.u_data, data, dlen);
364 static int start_handshake(struct ldc_channel *lp)
366 struct ldc_packet *p;
367 struct ldc_version *ver;
368 unsigned long new_tail;
372 ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n",
373 ver->major, ver->minor);
375 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
376 ver, sizeof(*ver), &new_tail);
378 int err = send_tx_packet(lp, p, new_tail);
380 lp->flags &= ~LDC_FLAG_RESET;
386 static int send_version_nack(struct ldc_channel *lp,
387 u16 major, u16 minor)
389 struct ldc_packet *p;
390 struct ldc_version ver;
391 unsigned long new_tail;
396 p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
397 &ver, sizeof(ver), &new_tail);
399 ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n",
400 ver.major, ver.minor);
402 return send_tx_packet(lp, p, new_tail);
407 static int send_version_ack(struct ldc_channel *lp,
408 struct ldc_version *vp)
410 struct ldc_packet *p;
411 unsigned long new_tail;
413 p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
414 vp, sizeof(*vp), &new_tail);
416 ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n",
417 vp->major, vp->minor);
419 return send_tx_packet(lp, p, new_tail);
424 static int send_rts(struct ldc_channel *lp)
426 struct ldc_packet *p;
427 unsigned long new_tail;
429 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
432 p->env = lp->cfg.mode;
436 ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n",
439 return send_tx_packet(lp, p, new_tail);
444 static int send_rtr(struct ldc_channel *lp)
446 struct ldc_packet *p;
447 unsigned long new_tail;
449 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
452 p->env = lp->cfg.mode;
455 ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n",
458 return send_tx_packet(lp, p, new_tail);
463 static int send_rdx(struct ldc_channel *lp)
465 struct ldc_packet *p;
466 unsigned long new_tail;
468 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
472 p->seqid = ++lp->snd_nxt;
473 p->u.r.ackid = lp->rcv_nxt;
475 ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
476 p->env, p->seqid, p->u.r.ackid);
478 return send_tx_packet(lp, p, new_tail);
483 static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
485 struct ldc_packet *p;
486 unsigned long new_tail;
489 p = data_get_tx_packet(lp, &new_tail);
492 memset(p, 0, sizeof(*p));
493 p->type = data_pkt->type;
495 p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
496 p->seqid = lp->snd_nxt + 1;
497 p->u.r.ackid = lp->rcv_nxt;
499 ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
500 p->type, p->ctrl, p->seqid, p->u.r.ackid);
502 err = send_tx_packet(lp, p, new_tail);
509 static int ldc_abort(struct ldc_channel *lp)
511 unsigned long hv_err;
513 ldcdbg(STATE, "ABORT\n");
515 /* We report but do not act upon the hypervisor errors because
516 * there really isn't much we can do if they fail at this point.
518 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
520 printk(KERN_ERR PFX "ldc_abort: "
521 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
522 lp->id, lp->tx_ra, lp->tx_num_entries, hv_err);
524 hv_err = sun4v_ldc_tx_get_state(lp->id,
529 printk(KERN_ERR PFX "ldc_abort: "
530 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
533 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
535 printk(KERN_ERR PFX "ldc_abort: "
536 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
537 lp->id, lp->rx_ra, lp->rx_num_entries, hv_err);
539 /* Refetch the RX queue state as well, because we could be invoked
540 * here in the queue processing context.
542 hv_err = sun4v_ldc_rx_get_state(lp->id,
547 printk(KERN_ERR PFX "ldc_abort: "
548 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
554 static struct ldc_version *find_by_major(u16 major)
556 struct ldc_version *ret = NULL;
559 for (i = 0; i < ARRAY_SIZE(ver_arr); i++) {
560 struct ldc_version *v = &ver_arr[i];
561 if (v->major <= major) {
569 static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
571 struct ldc_version *vap;
574 ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n",
575 vp->major, vp->minor);
577 if (lp->hs_state == LDC_HS_GOTVERS) {
578 lp->hs_state = LDC_HS_OPEN;
579 memset(&lp->ver, 0, sizeof(lp->ver));
582 vap = find_by_major(vp->major);
584 err = send_version_nack(lp, 0, 0);
585 } else if (vap->major != vp->major) {
586 err = send_version_nack(lp, vap->major, vap->minor);
588 struct ldc_version ver = *vp;
589 if (ver.minor > vap->minor)
590 ver.minor = vap->minor;
591 err = send_version_ack(lp, &ver);
594 lp->hs_state = LDC_HS_GOTVERS;
598 return ldc_abort(lp);
603 static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
605 ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n",
606 vp->major, vp->minor);
608 if (lp->hs_state == LDC_HS_GOTVERS) {
609 if (lp->ver.major != vp->major ||
610 lp->ver.minor != vp->minor)
611 return ldc_abort(lp);
614 lp->hs_state = LDC_HS_GOTVERS;
617 return ldc_abort(lp);
621 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
623 struct ldc_version *vap;
625 if ((vp->major == 0 && vp->minor == 0) ||
626 !(vap = find_by_major(vp->major))) {
627 return ldc_abort(lp);
629 struct ldc_packet *p;
630 unsigned long new_tail;
632 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
636 return send_tx_packet(lp, p, new_tail);
638 return ldc_abort(lp);
642 static int process_version(struct ldc_channel *lp,
643 struct ldc_packet *p)
645 struct ldc_version *vp;
647 vp = (struct ldc_version *) p->u.u_data;
651 return process_ver_info(lp, vp);
654 return process_ver_ack(lp, vp);
657 return process_ver_nack(lp, vp);
660 return ldc_abort(lp);
664 static int process_rts(struct ldc_channel *lp,
665 struct ldc_packet *p)
667 ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
668 p->stype, p->seqid, p->env);
670 if (p->stype != LDC_INFO ||
671 lp->hs_state != LDC_HS_GOTVERS ||
672 p->env != lp->cfg.mode)
673 return ldc_abort(lp);
675 lp->snd_nxt = p->seqid;
676 lp->rcv_nxt = p->seqid;
677 lp->hs_state = LDC_HS_SENTRTR;
679 return ldc_abort(lp);
684 static int process_rtr(struct ldc_channel *lp,
685 struct ldc_packet *p)
687 ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
688 p->stype, p->seqid, p->env);
690 if (p->stype != LDC_INFO ||
691 p->env != lp->cfg.mode)
692 return ldc_abort(lp);
694 lp->snd_nxt = p->seqid;
695 lp->hs_state = LDC_HS_COMPLETE;
696 ldc_set_state(lp, LDC_STATE_CONNECTED);
702 static int rx_seq_ok(struct ldc_channel *lp, u32 seqid)
704 return lp->rcv_nxt + 1 == seqid;
707 static int process_rdx(struct ldc_channel *lp,
708 struct ldc_packet *p)
710 ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
711 p->stype, p->seqid, p->env, p->u.r.ackid);
713 if (p->stype != LDC_INFO ||
714 !(rx_seq_ok(lp, p->seqid)))
715 return ldc_abort(lp);
717 lp->rcv_nxt = p->seqid;
719 lp->hs_state = LDC_HS_COMPLETE;
720 ldc_set_state(lp, LDC_STATE_CONNECTED);
725 static int process_control_frame(struct ldc_channel *lp,
726 struct ldc_packet *p)
730 return process_version(lp, p);
733 return process_rts(lp, p);
736 return process_rtr(lp, p);
739 return process_rdx(lp, p);
742 return ldc_abort(lp);
746 static int process_error_frame(struct ldc_channel *lp,
747 struct ldc_packet *p)
749 return ldc_abort(lp);
752 static int process_data_ack(struct ldc_channel *lp,
753 struct ldc_packet *ack)
755 unsigned long head = lp->tx_acked;
756 u32 ackid = ack->u.r.ackid;
759 struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
761 head = tx_advance(lp, head);
763 if (p->seqid == ackid) {
767 if (head == lp->tx_tail)
768 return ldc_abort(lp);
774 static void send_events(struct ldc_channel *lp, unsigned int event_mask)
776 if (event_mask & LDC_EVENT_RESET)
777 lp->cfg.event(lp->event_arg, LDC_EVENT_RESET);
778 if (event_mask & LDC_EVENT_UP)
779 lp->cfg.event(lp->event_arg, LDC_EVENT_UP);
780 if (event_mask & LDC_EVENT_DATA_READY)
781 lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);
784 static irqreturn_t ldc_rx(int irq, void *dev_id)
786 struct ldc_channel *lp = dev_id;
787 unsigned long orig_state, hv_err, flags;
788 unsigned int event_mask;
790 spin_lock_irqsave(&lp->lock, flags);
792 orig_state = lp->chan_state;
793 hv_err = sun4v_ldc_rx_get_state(lp->id,
798 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
799 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
803 if (lp->cfg.mode == LDC_MODE_RAW &&
804 lp->chan_state == LDC_CHANNEL_UP) {
805 lp->hs_state = LDC_HS_COMPLETE;
806 ldc_set_state(lp, LDC_STATE_CONNECTED);
808 event_mask |= LDC_EVENT_UP;
810 orig_state = lp->chan_state;
813 /* If we are in reset state, flush the RX queue and ignore
816 if (lp->flags & LDC_FLAG_RESET) {
817 (void) __set_rx_head(lp, lp->rx_tail);
821 /* Once we finish the handshake, we let the ldc_read()
822 * paths do all of the control frame and state management.
823 * Just trigger the callback.
825 if (lp->hs_state == LDC_HS_COMPLETE) {
827 if (lp->chan_state != orig_state) {
828 unsigned int event = LDC_EVENT_RESET;
830 if (lp->chan_state == LDC_CHANNEL_UP)
831 event = LDC_EVENT_UP;
835 if (lp->rx_head != lp->rx_tail)
836 event_mask |= LDC_EVENT_DATA_READY;
841 if (lp->chan_state != orig_state)
844 while (lp->rx_head != lp->rx_tail) {
845 struct ldc_packet *p;
849 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
853 err = process_control_frame(lp, p);
859 event_mask |= LDC_EVENT_DATA_READY;
864 err = process_error_frame(lp, p);
876 new += LDC_PACKET_SIZE;
877 if (new == (lp->rx_num_entries * LDC_PACKET_SIZE))
881 err = __set_rx_head(lp, new);
883 (void) ldc_abort(lp);
886 if (lp->hs_state == LDC_HS_COMPLETE)
887 goto handshake_complete;
891 spin_unlock_irqrestore(&lp->lock, flags);
893 send_events(lp, event_mask);
898 static irqreturn_t ldc_tx(int irq, void *dev_id)
900 struct ldc_channel *lp = dev_id;
901 unsigned long flags, hv_err, orig_state;
902 unsigned int event_mask = 0;
904 spin_lock_irqsave(&lp->lock, flags);
906 orig_state = lp->chan_state;
907 hv_err = sun4v_ldc_tx_get_state(lp->id,
912 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
913 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
915 if (lp->cfg.mode == LDC_MODE_RAW &&
916 lp->chan_state == LDC_CHANNEL_UP) {
917 lp->hs_state = LDC_HS_COMPLETE;
918 ldc_set_state(lp, LDC_STATE_CONNECTED);
920 event_mask |= LDC_EVENT_UP;
923 spin_unlock_irqrestore(&lp->lock, flags);
925 send_events(lp, event_mask);
930 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
931 * XXX that addition and removal from the ldc_channel_list has
932 * XXX atomicity, otherwise the __ldc_channel_exists() check is
933 * XXX totally pointless as another thread can slip into ldc_alloc()
934 * XXX and add a channel with the same ID. There also needs to be
935 * XXX a spinlock for ldc_channel_list.
937 static HLIST_HEAD(ldc_channel_list);
939 static int __ldc_channel_exists(unsigned long id)
941 struct ldc_channel *lp;
942 struct hlist_node *n;
944 hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
951 static int alloc_queue(const char *name, unsigned long num_entries,
952 struct ldc_packet **base, unsigned long *ra)
954 unsigned long size, order;
957 size = num_entries * LDC_PACKET_SIZE;
958 order = get_order(size);
960 q = (void *) __get_free_pages(GFP_KERNEL, order);
962 printk(KERN_ERR PFX "Alloc of %s queue failed with "
963 "size=%lu order=%lu\n", name, size, order);
967 memset(q, 0, PAGE_SIZE << order);
975 static void free_queue(unsigned long num_entries, struct ldc_packet *q)
977 unsigned long size, order;
982 size = num_entries * LDC_PACKET_SIZE;
983 order = get_order(size);
985 free_pages((unsigned long)q, order);
988 /* XXX Make this configurable... XXX */
989 #define LDC_IOTABLE_SIZE (8 * 1024)
991 static int ldc_iommu_init(struct ldc_channel *lp)
993 unsigned long sz, num_tsb_entries, tsbsize, order;
994 struct ldc_iommu *iommu = &lp->iommu;
995 struct ldc_mtable_entry *table;
996 unsigned long hv_err;
999 num_tsb_entries = LDC_IOTABLE_SIZE;
1000 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1002 spin_lock_init(&iommu->lock);
1004 sz = num_tsb_entries / 8;
1005 sz = (sz + 7UL) & ~7UL;
1006 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
1007 if (!iommu->arena.map) {
1008 printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
1012 iommu->arena.limit = num_tsb_entries;
1014 order = get_order(tsbsize);
1016 table = (struct ldc_mtable_entry *)
1017 __get_free_pages(GFP_KERNEL, order);
1020 printk(KERN_ERR PFX "Alloc of MTE table failed, "
1021 "size=%lu order=%lu\n", tsbsize, order);
1025 memset(table, 0, PAGE_SIZE << order);
1027 iommu->page_table = table;
1029 hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
1033 goto out_free_table;
1038 free_pages((unsigned long) table, order);
1039 iommu->page_table = NULL;
1042 kfree(iommu->arena.map);
1043 iommu->arena.map = NULL;
1048 static void ldc_iommu_release(struct ldc_channel *lp)
1050 struct ldc_iommu *iommu = &lp->iommu;
1051 unsigned long num_tsb_entries, tsbsize, order;
1053 (void) sun4v_ldc_set_map_table(lp->id, 0, 0);
1055 num_tsb_entries = iommu->arena.limit;
1056 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
1057 order = get_order(tsbsize);
1059 free_pages((unsigned long) iommu->page_table, order);
1060 iommu->page_table = NULL;
1062 kfree(iommu->arena.map);
1063 iommu->arena.map = NULL;
1066 struct ldc_channel *ldc_alloc(unsigned long id,
1067 const struct ldc_channel_config *cfgp,
1070 struct ldc_channel *lp;
1071 const struct ldc_mode_ops *mops;
1072 unsigned long dummy1, dummy2, hv_err;
1077 if (!ldom_domaining_enabled)
1084 switch (cfgp->mode) {
1087 mss = LDC_PACKET_SIZE;
1090 case LDC_MODE_UNRELIABLE:
1092 mss = LDC_PACKET_SIZE - 8;
1095 case LDC_MODE_STREAM:
1097 mss = LDC_PACKET_SIZE - 8 - 8;
1104 if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq)
1107 hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2);
1109 if (hv_err == HV_ECHANNEL)
1113 if (__ldc_channel_exists(id))
1118 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
1123 spin_lock_init(&lp->lock);
1127 err = ldc_iommu_init(lp);
1136 lp->cfg.mtu = LDC_DEFAULT_MTU;
1138 if (lp->cfg.mode == LDC_MODE_STREAM) {
1139 mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL);
1142 goto out_free_iommu;
1144 lp->mssbuf = mssbuf;
1147 lp->event_arg = event_arg;
1149 /* XXX allow setting via ldc_channel_config to override defaults
1150 * XXX or use some formula based upon mtu
1152 lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1153 lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES;
1155 err = alloc_queue("TX", lp->tx_num_entries,
1156 &lp->tx_base, &lp->tx_ra);
1158 goto out_free_mssbuf;
1160 err = alloc_queue("RX", lp->rx_num_entries,
1161 &lp->rx_base, &lp->rx_ra);
1165 lp->flags |= LDC_FLAG_ALLOCED_QUEUES;
1167 lp->hs_state = LDC_HS_CLOSED;
1168 ldc_set_state(lp, LDC_STATE_INIT);
1170 INIT_HLIST_NODE(&lp->list);
1171 hlist_add_head(&lp->list, &ldc_channel_list);
1173 INIT_HLIST_HEAD(&lp->mh_list);
1178 free_queue(lp->tx_num_entries, lp->tx_base);
1185 ldc_iommu_release(lp);
1191 return ERR_PTR(err);
1193 EXPORT_SYMBOL(ldc_alloc);
1195 void ldc_free(struct ldc_channel *lp)
1197 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) {
1198 free_irq(lp->cfg.rx_irq, lp);
1199 free_irq(lp->cfg.tx_irq, lp);
1202 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) {
1203 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1204 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1205 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1207 if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) {
1208 free_queue(lp->tx_num_entries, lp->tx_base);
1209 free_queue(lp->rx_num_entries, lp->rx_base);
1210 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES;
1213 hlist_del(&lp->list);
1218 ldc_iommu_release(lp);
1222 EXPORT_SYMBOL(ldc_free);
1224 /* Bind the channel. This registers the LDC queues with
1225 * the hypervisor and puts the channel into a pseudo-listening
1226 * state. This does not initiate a handshake, ldc_connect() does
1229 int ldc_bind(struct ldc_channel *lp)
1231 unsigned long hv_err, flags;
1234 spin_lock_irqsave(&lp->lock, flags);
1236 if (lp->state != LDC_STATE_INIT)
1239 err = request_irq(lp->cfg.rx_irq, ldc_rx,
1240 IRQF_SAMPLE_RANDOM | IRQF_SHARED,
1245 err = request_irq(lp->cfg.tx_irq, ldc_tx,
1246 IRQF_SAMPLE_RANDOM | IRQF_SHARED,
1249 goto out_free_rx_irq;
1252 lp->flags |= LDC_FLAG_REGISTERED_IRQS;
1255 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1257 goto out_free_tx_irq;
1259 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1261 goto out_free_tx_irq;
1263 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1267 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1271 lp->flags |= LDC_FLAG_REGISTERED_QUEUES;
1273 hv_err = sun4v_ldc_tx_get_state(lp->id,
1281 lp->tx_acked = lp->tx_head;
1283 lp->hs_state = LDC_HS_OPEN;
1284 ldc_set_state(lp, LDC_STATE_BOUND);
1286 spin_unlock_irqrestore(&lp->lock, flags);
1291 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES;
1292 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1295 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1298 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
1299 free_irq(lp->cfg.tx_irq, lp);
1302 free_irq(lp->cfg.rx_irq, lp);
1305 spin_unlock_irqrestore(&lp->lock, flags);
1309 EXPORT_SYMBOL(ldc_bind);
1311 int ldc_connect(struct ldc_channel *lp)
1313 unsigned long flags;
1316 if (lp->cfg.mode == LDC_MODE_RAW)
1319 spin_lock_irqsave(&lp->lock, flags);
1321 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1322 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
1323 lp->hs_state != LDC_HS_OPEN)
1326 err = start_handshake(lp);
1328 spin_unlock_irqrestore(&lp->lock, flags);
1332 EXPORT_SYMBOL(ldc_connect);
1334 int ldc_disconnect(struct ldc_channel *lp)
1336 unsigned long hv_err, flags;
1339 if (lp->cfg.mode == LDC_MODE_RAW)
1342 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1343 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES))
1346 spin_lock_irqsave(&lp->lock, flags);
1349 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
1353 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
1357 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
1361 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries);
1365 ldc_set_state(lp, LDC_STATE_BOUND);
1366 lp->hs_state = LDC_HS_OPEN;
1367 lp->flags |= LDC_FLAG_RESET;
1369 spin_unlock_irqrestore(&lp->lock, flags);
1374 sun4v_ldc_tx_qconf(lp->id, 0, 0);
1375 sun4v_ldc_rx_qconf(lp->id, 0, 0);
1376 free_irq(lp->cfg.tx_irq, lp);
1377 free_irq(lp->cfg.rx_irq, lp);
1378 lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS |
1379 LDC_FLAG_REGISTERED_QUEUES);
1380 ldc_set_state(lp, LDC_STATE_INIT);
1382 spin_unlock_irqrestore(&lp->lock, flags);
1386 EXPORT_SYMBOL(ldc_disconnect);
1388 int ldc_state(struct ldc_channel *lp)
1392 EXPORT_SYMBOL(ldc_state);
1394 static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
1396 struct ldc_packet *p;
1397 unsigned long new_tail;
1400 if (size > LDC_PACKET_SIZE)
1403 p = data_get_tx_packet(lp, &new_tail);
1407 memcpy(p, buf, size);
1409 err = send_tx_packet(lp, p, new_tail);
1416 static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
1418 struct ldc_packet *p;
1419 unsigned long hv_err, new;
1422 if (size < LDC_PACKET_SIZE)
1425 hv_err = sun4v_ldc_rx_get_state(lp->id,
1430 return ldc_abort(lp);
1432 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1433 lp->chan_state == LDC_CHANNEL_RESETTING)
1436 if (lp->rx_head == lp->rx_tail)
1439 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
1440 memcpy(buf, p, LDC_PACKET_SIZE);
1442 new = rx_advance(lp, lp->rx_head);
1445 err = __set_rx_head(lp, new);
1449 err = LDC_PACKET_SIZE;
1454 static const struct ldc_mode_ops raw_ops = {
1459 static int write_nonraw(struct ldc_channel *lp, const void *buf,
1462 unsigned long hv_err, tail;
1463 unsigned int copied;
1467 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
1469 if (unlikely(hv_err))
1472 if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
1473 return ldc_abort(lp);
1475 if (!tx_has_space_for(lp, size))
1481 while (copied < size) {
1482 struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
1483 u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ?
1489 p->stype = LDC_INFO;
1492 data_len = size - copied;
1493 if (data_len > lp->mss)
1496 BUG_ON(data_len > LDC_LEN);
1498 p->env = (data_len |
1499 (copied == 0 ? LDC_START : 0) |
1500 (data_len == size - copied ? LDC_STOP : 0));
1504 ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
1511 memcpy(data, buf, data_len);
1515 tail = tx_advance(lp, tail);
1518 err = set_tx_tail(lp, tail);
1527 static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
1528 struct ldc_packet *first_frag)
1533 lp->rcv_nxt = first_frag->seqid - 1;
1535 err = send_data_nack(lp, p);
1539 err = __set_rx_head(lp, lp->rx_tail);
1541 return ldc_abort(lp);
1546 static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
1548 if (p->stype & LDC_ACK) {
1549 int err = process_data_ack(lp, p);
1553 if (p->stype & LDC_NACK)
1554 return ldc_abort(lp);
1559 static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
1561 unsigned long dummy;
1564 ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
1565 cur_head, lp->rx_head, lp->rx_tail);
1566 while (limit-- > 0) {
1567 unsigned long hv_err;
1569 hv_err = sun4v_ldc_rx_get_state(lp->id,
1574 return ldc_abort(lp);
1576 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1577 lp->chan_state == LDC_CHANNEL_RESETTING)
1580 if (cur_head != lp->rx_tail) {
1581 ldcdbg(DATA, "DATA WAIT DONE "
1582 "head[%lx] tail[%lx] chan_state[%lx]\n",
1583 dummy, lp->rx_tail, lp->chan_state);
1592 static int rx_set_head(struct ldc_channel *lp, unsigned long head)
1594 int err = __set_rx_head(lp, head);
1597 return ldc_abort(lp);
1603 static void send_data_ack(struct ldc_channel *lp)
1605 unsigned long new_tail;
1606 struct ldc_packet *p;
1608 p = data_get_tx_packet(lp, &new_tail);
1612 memset(p, 0, sizeof(*p));
1616 p->seqid = lp->snd_nxt + 1;
1617 p->u.r.ackid = lp->rcv_nxt;
1619 err = send_tx_packet(lp, p, new_tail);
1625 static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
1627 struct ldc_packet *first_frag;
1628 unsigned long hv_err, new;
1631 hv_err = sun4v_ldc_rx_get_state(lp->id,
1636 return ldc_abort(lp);
1638 if (lp->chan_state == LDC_CHANNEL_DOWN ||
1639 lp->chan_state == LDC_CHANNEL_RESETTING)
1642 if (lp->rx_head == lp->rx_tail)
1649 struct ldc_packet *p;
1652 BUG_ON(new == lp->rx_tail);
1653 p = lp->rx_base + (new / LDC_PACKET_SIZE);
1655 ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
1665 if (unlikely(!rx_seq_ok(lp, p->seqid))) {
1666 err = rx_bad_seq(lp, p, first_frag);
1671 if (p->type & LDC_CTRL) {
1672 err = process_control_frame(lp, p);
1678 lp->rcv_nxt = p->seqid;
1680 if (!(p->type & LDC_DATA)) {
1681 new = rx_advance(lp, new);
1684 if (p->stype & (LDC_ACK | LDC_NACK)) {
1685 err = data_ack_nack(lp, p);
1689 if (!(p->stype & LDC_INFO)) {
1690 new = rx_advance(lp, new);
1691 err = rx_set_head(lp, new);
1697 pkt_len = p->env & LDC_LEN;
1699 /* Every initial packet starts with the START bit set.
1701 * Singleton packets will have both START+STOP set.
1703 * Fragments will have START set in the first frame, STOP
1704 * set in the last frame, and neither bit set in middle
1705 * frames of the packet.
1707 * Therefore if we are at the beginning of a packet and
1708 * we don't see START, or we are in the middle of a fragmented
1709 * packet and do see START, we are unsynchronized and should
1710 * flush the RX queue.
1712 if ((first_frag == NULL && !(p->env & LDC_START)) ||
1713 (first_frag != NULL && (p->env & LDC_START))) {
1715 new = rx_advance(lp, new);
1717 err = rx_set_head(lp, new);
1727 if (pkt_len > size - copied) {
1728 /* User didn't give us a big enough buffer,
1729 * what to do? This is a pretty serious error.
1731 * Since we haven't updated the RX ring head to
1732 * consume any of the packets, signal the error
1733 * to the user and just leave the RX ring alone.
1735 * This seems the best behavior because this allows
1736 * a user of the LDC layer to start with a small
1737 * RX buffer for ldc_read() calls and use -EMSGSIZE
1738 * as a cue to enlarge it's read buffer.
1744 /* Ok, we are gonna eat this one. */
1745 new = rx_advance(lp, new);
1748 (lp->cfg.mode == LDC_MODE_UNRELIABLE ?
1749 p->u.u_data : p->u.r.r_data), pkt_len);
1753 if (p->env & LDC_STOP)
1757 if (new == lp->rx_tail) {
1758 err = rx_data_wait(lp, new);
1765 err = rx_set_head(lp, new);
1767 if (err && first_frag)
1768 lp->rcv_nxt = first_frag->seqid - 1;
1772 if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE)
1779 static const struct ldc_mode_ops nonraw_ops = {
1780 .write = write_nonraw,
1781 .read = read_nonraw,
1784 static int write_stream(struct ldc_channel *lp, const void *buf,
1787 if (size > lp->cfg.mtu)
1789 return write_nonraw(lp, buf, size);
1792 static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size)
1794 if (!lp->mssbuf_len) {
1795 int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu);
1799 lp->mssbuf_len = err;
1803 if (size > lp->mssbuf_len)
1804 size = lp->mssbuf_len;
1805 memcpy(buf, lp->mssbuf + lp->mssbuf_off, size);
1807 lp->mssbuf_off += size;
1808 lp->mssbuf_len -= size;
1813 static const struct ldc_mode_ops stream_ops = {
1814 .write = write_stream,
1815 .read = read_stream,
1818 int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size)
1820 unsigned long flags;
1829 spin_lock_irqsave(&lp->lock, flags);
1831 if (lp->hs_state != LDC_HS_COMPLETE)
1834 err = lp->mops->write(lp, buf, size);
1836 spin_unlock_irqrestore(&lp->lock, flags);
1840 EXPORT_SYMBOL(ldc_write);
1842 int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
1844 unsigned long flags;
1853 spin_lock_irqsave(&lp->lock, flags);
1855 if (lp->hs_state != LDC_HS_COMPLETE)
1858 err = lp->mops->read(lp, buf, size);
1860 spin_unlock_irqrestore(&lp->lock, flags);
1864 EXPORT_SYMBOL(ldc_read);
1866 static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
1868 struct iommu_arena *arena = &iommu->arena;
1869 unsigned long n, i, start, end, limit;
1872 limit = arena->limit;
1873 start = arena->hint;
1877 n = find_next_zero_bit(arena->map, limit, start);
1879 if (unlikely(end >= limit)) {
1880 if (likely(pass < 1)) {
1886 /* Scanned the whole thing, give up. */
1891 for (i = n; i < end; i++) {
1892 if (test_bit(i, arena->map)) {
1898 for (i = n; i < end; i++)
1899 __set_bit(i, arena->map);
1906 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
1907 #define COOKIE_PGSZ_CODE_SHIFT 60ULL
1909 static u64 pagesize_code(void)
1911 switch (PAGE_SIZE) {
1913 case (8ULL * 1024ULL):
1915 case (64ULL * 1024ULL):
1917 case (512ULL * 1024ULL):
1919 case (4ULL * 1024ULL * 1024ULL):
1921 case (32ULL * 1024ULL * 1024ULL):
1923 case (256ULL * 1024ULL * 1024ULL):
1928 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
1930 return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) |
1931 (index << PAGE_SHIFT) |
1935 static u64 cookie_to_index(u64 cookie, unsigned long *shift)
1937 u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
1939 cookie &= ~COOKIE_PGSZ_CODE;
1941 *shift = szcode * 3;
1943 return (cookie >> (13ULL + (szcode * 3ULL)));
1946 static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
1947 unsigned long npages)
1951 entry = arena_alloc(iommu, npages);
1952 if (unlikely(entry < 0))
1955 return iommu->page_table + entry;
1958 static u64 perm_to_mte(unsigned int map_perm)
1962 mte_base = pagesize_code();
1964 if (map_perm & LDC_MAP_SHADOW) {
1965 if (map_perm & LDC_MAP_R)
1966 mte_base |= LDC_MTE_COPY_R;
1967 if (map_perm & LDC_MAP_W)
1968 mte_base |= LDC_MTE_COPY_W;
1970 if (map_perm & LDC_MAP_DIRECT) {
1971 if (map_perm & LDC_MAP_R)
1972 mte_base |= LDC_MTE_READ;
1973 if (map_perm & LDC_MAP_W)
1974 mte_base |= LDC_MTE_WRITE;
1975 if (map_perm & LDC_MAP_X)
1976 mte_base |= LDC_MTE_EXEC;
1978 if (map_perm & LDC_MAP_IO) {
1979 if (map_perm & LDC_MAP_R)
1980 mte_base |= LDC_MTE_IOMMU_R;
1981 if (map_perm & LDC_MAP_W)
1982 mte_base |= LDC_MTE_IOMMU_W;
1988 static int pages_in_region(unsigned long base, long len)
1993 unsigned long new = (base + PAGE_SIZE) & PAGE_MASK;
1995 len -= (new - base);
2003 struct cookie_state {
2004 struct ldc_mtable_entry *page_table;
2005 struct ldc_trans_cookie *cookies;
2012 static void fill_cookies(struct cookie_state *sp, unsigned long pa,
2013 unsigned long off, unsigned long len)
2016 unsigned long tlen, new = pa + PAGE_SIZE;
2019 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa;
2023 tlen = PAGE_SIZE - off;
2027 this_cookie = make_cookie(sp->pte_idx,
2028 pagesize_code(), off);
2032 if (this_cookie == sp->prev_cookie) {
2033 sp->cookies[sp->nc - 1].cookie_size += tlen;
2035 sp->cookies[sp->nc].cookie_addr = this_cookie;
2036 sp->cookies[sp->nc].cookie_size = tlen;
2039 sp->prev_cookie = this_cookie + tlen;
2048 static int sg_count_one(struct scatterlist *sg)
2050 unsigned long base = page_to_pfn(sg->page) << PAGE_SHIFT;
2051 long len = sg->length;
2053 if ((sg->offset | len) & (8UL - 1))
2056 return pages_in_region(base + sg->offset, len);
2059 static int sg_count_pages(struct scatterlist *sg, int num_sg)
2065 for (i = 0; i < num_sg; i++) {
2066 int err = sg_count_one(sg + i);
2075 int ldc_map_sg(struct ldc_channel *lp,
2076 struct scatterlist *sg, int num_sg,
2077 struct ldc_trans_cookie *cookies, int ncookies,
2078 unsigned int map_perm)
2080 unsigned long i, npages, flags;
2081 struct ldc_mtable_entry *base;
2082 struct cookie_state state;
2083 struct ldc_iommu *iommu;
2086 if (map_perm & ~LDC_MAP_ALL)
2089 err = sg_count_pages(sg, num_sg);
2099 spin_lock_irqsave(&iommu->lock, flags);
2100 base = alloc_npages(iommu, npages);
2101 spin_unlock_irqrestore(&iommu->lock, flags);
2106 state.page_table = iommu->page_table;
2107 state.cookies = cookies;
2108 state.mte_base = perm_to_mte(map_perm);
2109 state.prev_cookie = ~(u64)0;
2110 state.pte_idx = (base - iommu->page_table);
2113 for (i = 0; i < num_sg; i++)
2114 fill_cookies(&state, page_to_pfn(sg[i].page) << PAGE_SHIFT,
2115 sg[i].offset, sg[i].length);
2119 EXPORT_SYMBOL(ldc_map_sg);
2121 int ldc_map_single(struct ldc_channel *lp,
2122 void *buf, unsigned int len,
2123 struct ldc_trans_cookie *cookies, int ncookies,
2124 unsigned int map_perm)
2126 unsigned long npages, pa, flags;
2127 struct ldc_mtable_entry *base;
2128 struct cookie_state state;
2129 struct ldc_iommu *iommu;
2131 if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1))
2135 if ((pa | len) & (8UL - 1))
2138 npages = pages_in_region(pa, len);
2142 spin_lock_irqsave(&iommu->lock, flags);
2143 base = alloc_npages(iommu, npages);
2144 spin_unlock_irqrestore(&iommu->lock, flags);
2149 state.page_table = iommu->page_table;
2150 state.cookies = cookies;
2151 state.mte_base = perm_to_mte(map_perm);
2152 state.prev_cookie = ~(u64)0;
2153 state.pte_idx = (base - iommu->page_table);
2155 fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len);
2156 BUG_ON(state.nc != 1);
2160 EXPORT_SYMBOL(ldc_map_single);
2162 static void free_npages(unsigned long id, struct ldc_iommu *iommu,
2163 u64 cookie, u64 size)
2165 struct iommu_arena *arena = &iommu->arena;
2166 unsigned long i, shift, index, npages;
2167 struct ldc_mtable_entry *base;
2169 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
2170 index = cookie_to_index(cookie, &shift);
2171 base = iommu->page_table + index;
2173 BUG_ON(index > arena->limit ||
2174 (index + npages) > arena->limit);
2176 for (i = 0; i < npages; i++) {
2178 sun4v_ldc_revoke(id, cookie + (i << shift),
2181 __clear_bit(index + i, arena->map);
2185 void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
2188 struct ldc_iommu *iommu = &lp->iommu;
2189 unsigned long flags;
2192 spin_lock_irqsave(&iommu->lock, flags);
2193 for (i = 0; i < ncookies; i++) {
2194 u64 addr = cookies[i].cookie_addr;
2195 u64 size = cookies[i].cookie_size;
2197 free_npages(lp->id, iommu, addr, size);
2199 spin_unlock_irqrestore(&iommu->lock, flags);
2201 EXPORT_SYMBOL(ldc_unmap);
2203 int ldc_copy(struct ldc_channel *lp, int copy_dir,
2204 void *buf, unsigned int len, unsigned long offset,
2205 struct ldc_trans_cookie *cookies, int ncookies)
2207 unsigned int orig_len;
2211 if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) {
2212 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
2218 if ((ra | len | offset) & (8UL - 1)) {
2219 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer "
2220 "ra[%lx] len[%x] offset[%lx]\n",
2221 lp->id, ra, len, offset);
2225 if (lp->hs_state != LDC_HS_COMPLETE ||
2226 (lp->flags & LDC_FLAG_RESET)) {
2227 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] "
2228 "flags[%x]\n", lp->id, lp->hs_state, lp->flags);
2233 for (i = 0; i < ncookies; i++) {
2234 unsigned long cookie_raddr = cookies[i].cookie_addr;
2235 unsigned long this_len = cookies[i].cookie_size;
2236 unsigned long actual_len;
2238 if (unlikely(offset)) {
2239 unsigned long this_off = offset;
2241 if (this_off > this_len)
2242 this_off = this_len;
2245 this_len -= this_off;
2248 cookie_raddr += this_off;
2255 unsigned long hv_err;
2257 hv_err = sun4v_ldc_copy(lp->id, copy_dir,
2259 this_len, &actual_len);
2260 if (unlikely(hv_err)) {
2261 printk(KERN_ERR PFX "ldc_copy: ID[%lu] "
2264 if (lp->hs_state != LDC_HS_COMPLETE ||
2265 (lp->flags & LDC_FLAG_RESET))
2271 cookie_raddr += actual_len;
2274 if (actual_len == this_len)
2277 this_len -= actual_len;
2284 /* It is caller policy what to do about short copies.
2285 * For example, a networking driver can declare the
2286 * packet a runt and drop it.
2289 return orig_len - len;
2291 EXPORT_SYMBOL(ldc_copy);
2293 void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
2294 struct ldc_trans_cookie *cookies, int *ncookies,
2295 unsigned int map_perm)
2300 if (len & (8UL - 1))
2301 return ERR_PTR(-EINVAL);
2303 buf = kzalloc(len, GFP_KERNEL);
2305 return ERR_PTR(-ENOMEM);
2307 err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm);
2310 return ERR_PTR(err);
2316 EXPORT_SYMBOL(ldc_alloc_exp_dring);
2318 void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len,
2319 struct ldc_trans_cookie *cookies, int ncookies)
2321 ldc_unmap(lp, cookies, ncookies);
2324 EXPORT_SYMBOL(ldc_free_exp_dring);
2326 static int __init ldc_init(void)
2328 struct mdesc_node *mp;
2329 unsigned long major, minor;
2332 mp = md_find_node_by_name(NULL, "platform");
2336 v = md_get_property(mp, "domaining-enabled", NULL);
2342 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
2343 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
2347 printk(KERN_INFO "%s", version);
2350 printk(KERN_INFO PFX "Domaining disabled.\n");
2353 ldom_domaining_enabled = 1;
2358 core_initcall(ldc_init);