1 /* ldc.c: Logical Domain Channel link-layer protocol driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/scatterlist.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/iommu-common.h>
20 #include <asm/hypervisor.h>
21 #include <asm/iommu.h>
24 #include <asm/mdesc.h>
26 #define DRV_MODULE_NAME "ldc"
27 #define PFX DRV_MODULE_NAME ": "
28 #define DRV_MODULE_VERSION "1.1"
29 #define DRV_MODULE_RELDATE "July 22, 2008"
31 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
32 #define COOKIE_PGSZ_CODE_SHIFT 60ULL
35 static char version
[] =
36 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
37 #define LDC_PACKET_SIZE 64
39 /* Packet header layout for unreliable and reliable mode frames.
40 * When in RAW mode, packets are simply straight 64-byte payloads
55 #define LDC_VERS 0x01 /* Link Version */
56 #define LDC_RTS 0x02 /* Request To Send */
57 #define LDC_RTR 0x03 /* Ready To Receive */
58 #define LDC_RDX 0x04 /* Ready for Data eXchange */
59 #define LDC_CTRL_MSK 0x0f
63 #define LDC_FRAG_MASK 0xc0
64 #define LDC_START 0x40
70 u8 u_data
[LDC_PACKET_SIZE
- 8];
74 u8 r_data
[LDC_PACKET_SIZE
- 8 - 8];
84 /* Ordered from largest major to lowest. */
85 static struct ldc_version ver_arr
[] = {
86 { .major
= 1, .minor
= 0 },
89 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
90 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
95 int (*write
)(struct ldc_channel
*, const void *, unsigned int);
96 int (*read
)(struct ldc_channel
*, void *, unsigned int);
99 static const struct ldc_mode_ops raw_ops
;
100 static const struct ldc_mode_ops nonraw_ops
;
101 static const struct ldc_mode_ops stream_ops
;
103 int ldom_domaining_enabled
;
106 /* Protects ldc_unmap. */
108 struct ldc_mtable_entry
*page_table
;
109 struct iommu_map_table iommu_map_table
;
113 /* Protects all operations that depend upon channel state. */
122 struct ldc_packet
*tx_base
;
123 unsigned long tx_head
;
124 unsigned long tx_tail
;
125 unsigned long tx_num_entries
;
128 unsigned long tx_acked
;
130 struct ldc_packet
*rx_base
;
131 unsigned long rx_head
;
132 unsigned long rx_tail
;
133 unsigned long rx_num_entries
;
139 unsigned long chan_state
;
141 struct ldc_channel_config cfg
;
144 const struct ldc_mode_ops
*mops
;
146 struct ldc_iommu iommu
;
148 struct ldc_version ver
;
151 #define LDC_HS_CLOSED 0x00
152 #define LDC_HS_OPEN 0x01
153 #define LDC_HS_GOTVERS 0x02
154 #define LDC_HS_SENTRTR 0x03
155 #define LDC_HS_GOTRTR 0x04
156 #define LDC_HS_COMPLETE 0x10
159 #define LDC_FLAG_ALLOCED_QUEUES 0x01
160 #define LDC_FLAG_REGISTERED_QUEUES 0x02
161 #define LDC_FLAG_REGISTERED_IRQS 0x04
162 #define LDC_FLAG_RESET 0x10
167 #define LDC_IRQ_NAME_MAX 32
168 char rx_irq_name
[LDC_IRQ_NAME_MAX
];
169 char tx_irq_name
[LDC_IRQ_NAME_MAX
];
171 struct hlist_head mh_list
;
173 struct hlist_node list
;
176 #define ldcdbg(TYPE, f, a...) \
177 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
178 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
181 static const char *state_to_str(u8 state
)
184 case LDC_STATE_INVALID
:
188 case LDC_STATE_BOUND
:
190 case LDC_STATE_READY
:
192 case LDC_STATE_CONNECTED
:
199 static void ldc_set_state(struct ldc_channel
*lp
, u8 state
)
201 ldcdbg(STATE
, "STATE (%s) --> (%s)\n",
202 state_to_str(lp
->state
),
203 state_to_str(state
));
208 static unsigned long __advance(unsigned long off
, unsigned long num_entries
)
210 off
+= LDC_PACKET_SIZE
;
211 if (off
== (num_entries
* LDC_PACKET_SIZE
))
217 static unsigned long rx_advance(struct ldc_channel
*lp
, unsigned long off
)
219 return __advance(off
, lp
->rx_num_entries
);
222 static unsigned long tx_advance(struct ldc_channel
*lp
, unsigned long off
)
224 return __advance(off
, lp
->tx_num_entries
);
227 static struct ldc_packet
*handshake_get_tx_packet(struct ldc_channel
*lp
,
228 unsigned long *new_tail
)
230 struct ldc_packet
*p
;
233 t
= tx_advance(lp
, lp
->tx_tail
);
234 if (t
== lp
->tx_head
)
240 return p
+ (lp
->tx_tail
/ LDC_PACKET_SIZE
);
243 /* When we are in reliable or stream mode, have to track the next packet
244 * we haven't gotten an ACK for in the TX queue using tx_acked. We have
245 * to be careful not to stomp over the queue past that point. During
246 * the handshake, we don't have TX data packets pending in the queue
247 * and that's why handshake_get_tx_packet() need not be mindful of
250 static unsigned long head_for_data(struct ldc_channel
*lp
)
252 if (lp
->cfg
.mode
== LDC_MODE_STREAM
)
257 static int tx_has_space_for(struct ldc_channel
*lp
, unsigned int size
)
259 unsigned long limit
, tail
, new_tail
, diff
;
262 limit
= head_for_data(lp
);
264 new_tail
= tx_advance(lp
, tail
);
265 if (new_tail
== limit
)
268 if (limit
> new_tail
)
269 diff
= limit
- new_tail
;
272 ((lp
->tx_num_entries
* LDC_PACKET_SIZE
) - new_tail
));
273 diff
/= LDC_PACKET_SIZE
;
276 if (diff
* mss
< size
)
282 static struct ldc_packet
*data_get_tx_packet(struct ldc_channel
*lp
,
283 unsigned long *new_tail
)
285 struct ldc_packet
*p
;
288 h
= head_for_data(lp
);
289 t
= tx_advance(lp
, lp
->tx_tail
);
296 return p
+ (lp
->tx_tail
/ LDC_PACKET_SIZE
);
299 static int set_tx_tail(struct ldc_channel
*lp
, unsigned long tail
)
301 unsigned long orig_tail
= lp
->tx_tail
;
305 while (limit
-- > 0) {
308 err
= sun4v_ldc_tx_set_qtail(lp
->id
, tail
);
312 if (err
!= HV_EWOULDBLOCK
) {
313 lp
->tx_tail
= orig_tail
;
319 lp
->tx_tail
= orig_tail
;
323 /* This just updates the head value in the hypervisor using
324 * a polling loop with a timeout. The caller takes care of
325 * upating software state representing the head change, if any.
327 static int __set_rx_head(struct ldc_channel
*lp
, unsigned long head
)
331 while (limit
-- > 0) {
334 err
= sun4v_ldc_rx_set_qhead(lp
->id
, head
);
338 if (err
!= HV_EWOULDBLOCK
)
347 static int send_tx_packet(struct ldc_channel
*lp
,
348 struct ldc_packet
*p
,
349 unsigned long new_tail
)
351 BUG_ON(p
!= (lp
->tx_base
+ (lp
->tx_tail
/ LDC_PACKET_SIZE
)));
353 return set_tx_tail(lp
, new_tail
);
356 static struct ldc_packet
*handshake_compose_ctrl(struct ldc_channel
*lp
,
358 void *data
, int dlen
,
359 unsigned long *new_tail
)
361 struct ldc_packet
*p
= handshake_get_tx_packet(lp
, new_tail
);
364 memset(p
, 0, sizeof(*p
));
369 memcpy(p
->u
.u_data
, data
, dlen
);
374 static int start_handshake(struct ldc_channel
*lp
)
376 struct ldc_packet
*p
;
377 struct ldc_version
*ver
;
378 unsigned long new_tail
;
382 ldcdbg(HS
, "SEND VER INFO maj[%u] min[%u]\n",
383 ver
->major
, ver
->minor
);
385 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_VERS
,
386 ver
, sizeof(*ver
), &new_tail
);
388 int err
= send_tx_packet(lp
, p
, new_tail
);
390 lp
->flags
&= ~LDC_FLAG_RESET
;
396 static int send_version_nack(struct ldc_channel
*lp
,
397 u16 major
, u16 minor
)
399 struct ldc_packet
*p
;
400 struct ldc_version ver
;
401 unsigned long new_tail
;
406 p
= handshake_compose_ctrl(lp
, LDC_NACK
, LDC_VERS
,
407 &ver
, sizeof(ver
), &new_tail
);
409 ldcdbg(HS
, "SEND VER NACK maj[%u] min[%u]\n",
410 ver
.major
, ver
.minor
);
412 return send_tx_packet(lp
, p
, new_tail
);
417 static int send_version_ack(struct ldc_channel
*lp
,
418 struct ldc_version
*vp
)
420 struct ldc_packet
*p
;
421 unsigned long new_tail
;
423 p
= handshake_compose_ctrl(lp
, LDC_ACK
, LDC_VERS
,
424 vp
, sizeof(*vp
), &new_tail
);
426 ldcdbg(HS
, "SEND VER ACK maj[%u] min[%u]\n",
427 vp
->major
, vp
->minor
);
429 return send_tx_packet(lp
, p
, new_tail
);
434 static int send_rts(struct ldc_channel
*lp
)
436 struct ldc_packet
*p
;
437 unsigned long new_tail
;
439 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_RTS
, NULL
, 0,
442 p
->env
= lp
->cfg
.mode
;
446 ldcdbg(HS
, "SEND RTS env[0x%x] seqid[0x%x]\n",
449 return send_tx_packet(lp
, p
, new_tail
);
454 static int send_rtr(struct ldc_channel
*lp
)
456 struct ldc_packet
*p
;
457 unsigned long new_tail
;
459 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_RTR
, NULL
, 0,
462 p
->env
= lp
->cfg
.mode
;
465 ldcdbg(HS
, "SEND RTR env[0x%x] seqid[0x%x]\n",
468 return send_tx_packet(lp
, p
, new_tail
);
473 static int send_rdx(struct ldc_channel
*lp
)
475 struct ldc_packet
*p
;
476 unsigned long new_tail
;
478 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_RDX
, NULL
, 0,
482 p
->seqid
= ++lp
->snd_nxt
;
483 p
->u
.r
.ackid
= lp
->rcv_nxt
;
485 ldcdbg(HS
, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
486 p
->env
, p
->seqid
, p
->u
.r
.ackid
);
488 return send_tx_packet(lp
, p
, new_tail
);
493 static int send_data_nack(struct ldc_channel
*lp
, struct ldc_packet
*data_pkt
)
495 struct ldc_packet
*p
;
496 unsigned long new_tail
;
499 p
= data_get_tx_packet(lp
, &new_tail
);
502 memset(p
, 0, sizeof(*p
));
503 p
->type
= data_pkt
->type
;
505 p
->ctrl
= data_pkt
->ctrl
& LDC_CTRL_MSK
;
506 p
->seqid
= lp
->snd_nxt
+ 1;
507 p
->u
.r
.ackid
= lp
->rcv_nxt
;
509 ldcdbg(HS
, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
510 p
->type
, p
->ctrl
, p
->seqid
, p
->u
.r
.ackid
);
512 err
= send_tx_packet(lp
, p
, new_tail
);
519 static int ldc_abort(struct ldc_channel
*lp
)
521 unsigned long hv_err
;
523 ldcdbg(STATE
, "ABORT\n");
525 /* We report but do not act upon the hypervisor errors because
526 * there really isn't much we can do if they fail at this point.
528 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
);
530 printk(KERN_ERR PFX
"ldc_abort: "
531 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
532 lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
, hv_err
);
534 hv_err
= sun4v_ldc_tx_get_state(lp
->id
,
539 printk(KERN_ERR PFX
"ldc_abort: "
540 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
543 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
);
545 printk(KERN_ERR PFX
"ldc_abort: "
546 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
547 lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
, hv_err
);
549 /* Refetch the RX queue state as well, because we could be invoked
550 * here in the queue processing context.
552 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
557 printk(KERN_ERR PFX
"ldc_abort: "
558 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
564 static struct ldc_version
*find_by_major(u16 major
)
566 struct ldc_version
*ret
= NULL
;
569 for (i
= 0; i
< ARRAY_SIZE(ver_arr
); i
++) {
570 struct ldc_version
*v
= &ver_arr
[i
];
571 if (v
->major
<= major
) {
579 static int process_ver_info(struct ldc_channel
*lp
, struct ldc_version
*vp
)
581 struct ldc_version
*vap
;
584 ldcdbg(HS
, "GOT VERSION INFO major[%x] minor[%x]\n",
585 vp
->major
, vp
->minor
);
587 if (lp
->hs_state
== LDC_HS_GOTVERS
) {
588 lp
->hs_state
= LDC_HS_OPEN
;
589 memset(&lp
->ver
, 0, sizeof(lp
->ver
));
592 vap
= find_by_major(vp
->major
);
594 err
= send_version_nack(lp
, 0, 0);
595 } else if (vap
->major
!= vp
->major
) {
596 err
= send_version_nack(lp
, vap
->major
, vap
->minor
);
598 struct ldc_version ver
= *vp
;
599 if (ver
.minor
> vap
->minor
)
600 ver
.minor
= vap
->minor
;
601 err
= send_version_ack(lp
, &ver
);
604 lp
->hs_state
= LDC_HS_GOTVERS
;
608 return ldc_abort(lp
);
613 static int process_ver_ack(struct ldc_channel
*lp
, struct ldc_version
*vp
)
615 ldcdbg(HS
, "GOT VERSION ACK major[%x] minor[%x]\n",
616 vp
->major
, vp
->minor
);
618 if (lp
->hs_state
== LDC_HS_GOTVERS
) {
619 if (lp
->ver
.major
!= vp
->major
||
620 lp
->ver
.minor
!= vp
->minor
)
621 return ldc_abort(lp
);
624 lp
->hs_state
= LDC_HS_GOTVERS
;
627 return ldc_abort(lp
);
631 static int process_ver_nack(struct ldc_channel
*lp
, struct ldc_version
*vp
)
633 struct ldc_version
*vap
;
634 struct ldc_packet
*p
;
635 unsigned long new_tail
;
637 if (vp
->major
== 0 && vp
->minor
== 0)
638 return ldc_abort(lp
);
640 vap
= find_by_major(vp
->major
);
642 return ldc_abort(lp
);
644 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_VERS
,
648 return ldc_abort(lp
);
650 return send_tx_packet(lp
, p
, new_tail
);
653 static int process_version(struct ldc_channel
*lp
,
654 struct ldc_packet
*p
)
656 struct ldc_version
*vp
;
658 vp
= (struct ldc_version
*) p
->u
.u_data
;
662 return process_ver_info(lp
, vp
);
665 return process_ver_ack(lp
, vp
);
668 return process_ver_nack(lp
, vp
);
671 return ldc_abort(lp
);
675 static int process_rts(struct ldc_channel
*lp
,
676 struct ldc_packet
*p
)
678 ldcdbg(HS
, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
679 p
->stype
, p
->seqid
, p
->env
);
681 if (p
->stype
!= LDC_INFO
||
682 lp
->hs_state
!= LDC_HS_GOTVERS
||
683 p
->env
!= lp
->cfg
.mode
)
684 return ldc_abort(lp
);
686 lp
->snd_nxt
= p
->seqid
;
687 lp
->rcv_nxt
= p
->seqid
;
688 lp
->hs_state
= LDC_HS_SENTRTR
;
690 return ldc_abort(lp
);
695 static int process_rtr(struct ldc_channel
*lp
,
696 struct ldc_packet
*p
)
698 ldcdbg(HS
, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
699 p
->stype
, p
->seqid
, p
->env
);
701 if (p
->stype
!= LDC_INFO
||
702 p
->env
!= lp
->cfg
.mode
)
703 return ldc_abort(lp
);
705 lp
->snd_nxt
= p
->seqid
;
706 lp
->hs_state
= LDC_HS_COMPLETE
;
707 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
713 static int rx_seq_ok(struct ldc_channel
*lp
, u32 seqid
)
715 return lp
->rcv_nxt
+ 1 == seqid
;
718 static int process_rdx(struct ldc_channel
*lp
,
719 struct ldc_packet
*p
)
721 ldcdbg(HS
, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
722 p
->stype
, p
->seqid
, p
->env
, p
->u
.r
.ackid
);
724 if (p
->stype
!= LDC_INFO
||
725 !(rx_seq_ok(lp
, p
->seqid
)))
726 return ldc_abort(lp
);
728 lp
->rcv_nxt
= p
->seqid
;
730 lp
->hs_state
= LDC_HS_COMPLETE
;
731 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
736 static int process_control_frame(struct ldc_channel
*lp
,
737 struct ldc_packet
*p
)
741 return process_version(lp
, p
);
744 return process_rts(lp
, p
);
747 return process_rtr(lp
, p
);
750 return process_rdx(lp
, p
);
753 return ldc_abort(lp
);
757 static int process_error_frame(struct ldc_channel
*lp
,
758 struct ldc_packet
*p
)
760 return ldc_abort(lp
);
763 static int process_data_ack(struct ldc_channel
*lp
,
764 struct ldc_packet
*ack
)
766 unsigned long head
= lp
->tx_acked
;
767 u32 ackid
= ack
->u
.r
.ackid
;
770 struct ldc_packet
*p
= lp
->tx_base
+ (head
/ LDC_PACKET_SIZE
);
772 head
= tx_advance(lp
, head
);
774 if (p
->seqid
== ackid
) {
778 if (head
== lp
->tx_tail
)
779 return ldc_abort(lp
);
785 static void send_events(struct ldc_channel
*lp
, unsigned int event_mask
)
787 if (event_mask
& LDC_EVENT_RESET
)
788 lp
->cfg
.event(lp
->event_arg
, LDC_EVENT_RESET
);
789 if (event_mask
& LDC_EVENT_UP
)
790 lp
->cfg
.event(lp
->event_arg
, LDC_EVENT_UP
);
791 if (event_mask
& LDC_EVENT_DATA_READY
)
792 lp
->cfg
.event(lp
->event_arg
, LDC_EVENT_DATA_READY
);
795 static irqreturn_t
ldc_rx(int irq
, void *dev_id
)
797 struct ldc_channel
*lp
= dev_id
;
798 unsigned long orig_state
, flags
;
799 unsigned int event_mask
;
801 spin_lock_irqsave(&lp
->lock
, flags
);
803 orig_state
= lp
->chan_state
;
805 /* We should probably check for hypervisor errors here and
806 * reset the LDC channel if we get one.
808 sun4v_ldc_rx_get_state(lp
->id
,
813 ldcdbg(RX
, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
814 orig_state
, lp
->chan_state
, lp
->rx_head
, lp
->rx_tail
);
818 if (lp
->cfg
.mode
== LDC_MODE_RAW
&&
819 lp
->chan_state
== LDC_CHANNEL_UP
) {
820 lp
->hs_state
= LDC_HS_COMPLETE
;
821 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
823 event_mask
|= LDC_EVENT_UP
;
825 orig_state
= lp
->chan_state
;
828 /* If we are in reset state, flush the RX queue and ignore
831 if (lp
->flags
& LDC_FLAG_RESET
) {
832 (void) __set_rx_head(lp
, lp
->rx_tail
);
836 /* Once we finish the handshake, we let the ldc_read()
837 * paths do all of the control frame and state management.
838 * Just trigger the callback.
840 if (lp
->hs_state
== LDC_HS_COMPLETE
) {
842 if (lp
->chan_state
!= orig_state
) {
843 unsigned int event
= LDC_EVENT_RESET
;
845 if (lp
->chan_state
== LDC_CHANNEL_UP
)
846 event
= LDC_EVENT_UP
;
850 if (lp
->rx_head
!= lp
->rx_tail
)
851 event_mask
|= LDC_EVENT_DATA_READY
;
856 if (lp
->chan_state
!= orig_state
)
859 while (lp
->rx_head
!= lp
->rx_tail
) {
860 struct ldc_packet
*p
;
864 p
= lp
->rx_base
+ (lp
->rx_head
/ LDC_PACKET_SIZE
);
868 err
= process_control_frame(lp
, p
);
874 event_mask
|= LDC_EVENT_DATA_READY
;
879 err
= process_error_frame(lp
, p
);
891 new += LDC_PACKET_SIZE
;
892 if (new == (lp
->rx_num_entries
* LDC_PACKET_SIZE
))
896 err
= __set_rx_head(lp
, new);
898 (void) ldc_abort(lp
);
901 if (lp
->hs_state
== LDC_HS_COMPLETE
)
902 goto handshake_complete
;
906 spin_unlock_irqrestore(&lp
->lock
, flags
);
908 send_events(lp
, event_mask
);
913 static irqreturn_t
ldc_tx(int irq
, void *dev_id
)
915 struct ldc_channel
*lp
= dev_id
;
916 unsigned long flags
, orig_state
;
917 unsigned int event_mask
= 0;
919 spin_lock_irqsave(&lp
->lock
, flags
);
921 orig_state
= lp
->chan_state
;
923 /* We should probably check for hypervisor errors here and
924 * reset the LDC channel if we get one.
926 sun4v_ldc_tx_get_state(lp
->id
,
931 ldcdbg(TX
, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
932 orig_state
, lp
->chan_state
, lp
->tx_head
, lp
->tx_tail
);
934 if (lp
->cfg
.mode
== LDC_MODE_RAW
&&
935 lp
->chan_state
== LDC_CHANNEL_UP
) {
936 lp
->hs_state
= LDC_HS_COMPLETE
;
937 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
939 event_mask
|= LDC_EVENT_UP
;
942 spin_unlock_irqrestore(&lp
->lock
, flags
);
944 send_events(lp
, event_mask
);
949 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
950 * XXX that addition and removal from the ldc_channel_list has
951 * XXX atomicity, otherwise the __ldc_channel_exists() check is
952 * XXX totally pointless as another thread can slip into ldc_alloc()
953 * XXX and add a channel with the same ID. There also needs to be
954 * XXX a spinlock for ldc_channel_list.
956 static HLIST_HEAD(ldc_channel_list
);
958 static int __ldc_channel_exists(unsigned long id
)
960 struct ldc_channel
*lp
;
962 hlist_for_each_entry(lp
, &ldc_channel_list
, list
) {
969 static int alloc_queue(const char *name
, unsigned long num_entries
,
970 struct ldc_packet
**base
, unsigned long *ra
)
972 unsigned long size
, order
;
975 size
= num_entries
* LDC_PACKET_SIZE
;
976 order
= get_order(size
);
978 q
= (void *) __get_free_pages(GFP_KERNEL
, order
);
980 printk(KERN_ERR PFX
"Alloc of %s queue failed with "
981 "size=%lu order=%lu\n", name
, size
, order
);
985 memset(q
, 0, PAGE_SIZE
<< order
);
993 static void free_queue(unsigned long num_entries
, struct ldc_packet
*q
)
995 unsigned long size
, order
;
1000 size
= num_entries
* LDC_PACKET_SIZE
;
1001 order
= get_order(size
);
1003 free_pages((unsigned long)q
, order
);
1006 static unsigned long ldc_cookie_to_index(u64 cookie
, void *arg
)
1008 u64 szcode
= cookie
>> COOKIE_PGSZ_CODE_SHIFT
;
1009 /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
1011 cookie
&= ~COOKIE_PGSZ_CODE
;
1013 return (cookie
>> (13ULL + (szcode
* 3ULL)));
1016 static void ldc_demap(struct ldc_iommu
*iommu
, unsigned long id
, u64 cookie
,
1017 unsigned long entry
, unsigned long npages
)
1019 struct ldc_mtable_entry
*base
;
1020 unsigned long i
, shift
;
1022 shift
= (cookie
>> COOKIE_PGSZ_CODE_SHIFT
) * 3;
1023 base
= iommu
->page_table
+ entry
;
1024 for (i
= 0; i
< npages
; i
++) {
1026 sun4v_ldc_revoke(id
, cookie
+ (i
<< shift
),
1032 /* XXX Make this configurable... XXX */
1033 #define LDC_IOTABLE_SIZE (8 * 1024)
1035 static int ldc_iommu_init(const char *name
, struct ldc_channel
*lp
)
1037 unsigned long sz
, num_tsb_entries
, tsbsize
, order
;
1038 struct ldc_iommu
*ldc_iommu
= &lp
->iommu
;
1039 struct iommu_map_table
*iommu
= &ldc_iommu
->iommu_map_table
;
1040 struct ldc_mtable_entry
*table
;
1041 unsigned long hv_err
;
1044 num_tsb_entries
= LDC_IOTABLE_SIZE
;
1045 tsbsize
= num_tsb_entries
* sizeof(struct ldc_mtable_entry
);
1046 spin_lock_init(&ldc_iommu
->lock
);
1048 sz
= num_tsb_entries
/ 8;
1049 sz
= (sz
+ 7UL) & ~7UL;
1050 iommu
->map
= kzalloc(sz
, GFP_KERNEL
);
1052 printk(KERN_ERR PFX
"Alloc of arena map failed, sz=%lu\n", sz
);
1055 iommu_tbl_pool_init(iommu
, num_tsb_entries
, PAGE_SHIFT
,
1056 NULL
, false /* no large pool */,
1058 true /* skip span boundary check */);
1060 order
= get_order(tsbsize
);
1062 table
= (struct ldc_mtable_entry
*)
1063 __get_free_pages(GFP_KERNEL
, order
);
1066 printk(KERN_ERR PFX
"Alloc of MTE table failed, "
1067 "size=%lu order=%lu\n", tsbsize
, order
);
1071 memset(table
, 0, PAGE_SIZE
<< order
);
1073 ldc_iommu
->page_table
= table
;
1075 hv_err
= sun4v_ldc_set_map_table(lp
->id
, __pa(table
),
1079 goto out_free_table
;
1084 free_pages((unsigned long) table
, order
);
1085 ldc_iommu
->page_table
= NULL
;
1094 static void ldc_iommu_release(struct ldc_channel
*lp
)
1096 struct ldc_iommu
*ldc_iommu
= &lp
->iommu
;
1097 struct iommu_map_table
*iommu
= &ldc_iommu
->iommu_map_table
;
1098 unsigned long num_tsb_entries
, tsbsize
, order
;
1100 (void) sun4v_ldc_set_map_table(lp
->id
, 0, 0);
1102 num_tsb_entries
= iommu
->poolsize
* iommu
->nr_pools
;
1103 tsbsize
= num_tsb_entries
* sizeof(struct ldc_mtable_entry
);
1104 order
= get_order(tsbsize
);
1106 free_pages((unsigned long) ldc_iommu
->page_table
, order
);
1107 ldc_iommu
->page_table
= NULL
;
1113 struct ldc_channel
*ldc_alloc(unsigned long id
,
1114 const struct ldc_channel_config
*cfgp
,
1118 struct ldc_channel
*lp
;
1119 const struct ldc_mode_ops
*mops
;
1120 unsigned long dummy1
, dummy2
, hv_err
;
1125 if (!ldom_domaining_enabled
)
1134 switch (cfgp
->mode
) {
1137 mss
= LDC_PACKET_SIZE
;
1140 case LDC_MODE_UNRELIABLE
:
1142 mss
= LDC_PACKET_SIZE
- 8;
1145 case LDC_MODE_STREAM
:
1147 mss
= LDC_PACKET_SIZE
- 8 - 8;
1154 if (!cfgp
->event
|| !event_arg
|| !cfgp
->rx_irq
|| !cfgp
->tx_irq
)
1157 hv_err
= sun4v_ldc_tx_qinfo(id
, &dummy1
, &dummy2
);
1159 if (hv_err
== HV_ECHANNEL
)
1163 if (__ldc_channel_exists(id
))
1168 lp
= kzalloc(sizeof(*lp
), GFP_KERNEL
);
1173 spin_lock_init(&lp
->lock
);
1177 err
= ldc_iommu_init(name
, lp
);
1186 lp
->cfg
.mtu
= LDC_DEFAULT_MTU
;
1188 if (lp
->cfg
.mode
== LDC_MODE_STREAM
) {
1189 mssbuf
= kzalloc(lp
->cfg
.mtu
, GFP_KERNEL
);
1192 goto out_free_iommu
;
1194 lp
->mssbuf
= mssbuf
;
1197 lp
->event_arg
= event_arg
;
1199 /* XXX allow setting via ldc_channel_config to override defaults
1200 * XXX or use some formula based upon mtu
1202 lp
->tx_num_entries
= LDC_DEFAULT_NUM_ENTRIES
;
1203 lp
->rx_num_entries
= LDC_DEFAULT_NUM_ENTRIES
;
1205 err
= alloc_queue("TX", lp
->tx_num_entries
,
1206 &lp
->tx_base
, &lp
->tx_ra
);
1208 goto out_free_mssbuf
;
1210 err
= alloc_queue("RX", lp
->rx_num_entries
,
1211 &lp
->rx_base
, &lp
->rx_ra
);
1215 lp
->flags
|= LDC_FLAG_ALLOCED_QUEUES
;
1217 lp
->hs_state
= LDC_HS_CLOSED
;
1218 ldc_set_state(lp
, LDC_STATE_INIT
);
1220 INIT_HLIST_NODE(&lp
->list
);
1221 hlist_add_head(&lp
->list
, &ldc_channel_list
);
1223 INIT_HLIST_HEAD(&lp
->mh_list
);
1225 snprintf(lp
->rx_irq_name
, LDC_IRQ_NAME_MAX
, "%s RX", name
);
1226 snprintf(lp
->tx_irq_name
, LDC_IRQ_NAME_MAX
, "%s TX", name
);
1228 err
= request_irq(lp
->cfg
.rx_irq
, ldc_rx
, 0,
1229 lp
->rx_irq_name
, lp
);
1233 err
= request_irq(lp
->cfg
.tx_irq
, ldc_tx
, 0,
1234 lp
->tx_irq_name
, lp
);
1236 free_irq(lp
->cfg
.rx_irq
, lp
);
1243 free_queue(lp
->tx_num_entries
, lp
->tx_base
);
1249 ldc_iommu_release(lp
);
1255 return ERR_PTR(err
);
1257 EXPORT_SYMBOL(ldc_alloc
);
1259 void ldc_unbind(struct ldc_channel
*lp
)
1261 if (lp
->flags
& LDC_FLAG_REGISTERED_IRQS
) {
1262 free_irq(lp
->cfg
.rx_irq
, lp
);
1263 free_irq(lp
->cfg
.tx_irq
, lp
);
1264 lp
->flags
&= ~LDC_FLAG_REGISTERED_IRQS
;
1267 if (lp
->flags
& LDC_FLAG_REGISTERED_QUEUES
) {
1268 sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1269 sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1270 lp
->flags
&= ~LDC_FLAG_REGISTERED_QUEUES
;
1272 if (lp
->flags
& LDC_FLAG_ALLOCED_QUEUES
) {
1273 free_queue(lp
->tx_num_entries
, lp
->tx_base
);
1274 free_queue(lp
->rx_num_entries
, lp
->rx_base
);
1275 lp
->flags
&= ~LDC_FLAG_ALLOCED_QUEUES
;
1278 ldc_set_state(lp
, LDC_STATE_INIT
);
1280 EXPORT_SYMBOL(ldc_unbind
);
1282 void ldc_free(struct ldc_channel
*lp
)
1285 hlist_del(&lp
->list
);
1287 ldc_iommu_release(lp
);
1291 EXPORT_SYMBOL(ldc_free
);
1293 /* Bind the channel. This registers the LDC queues with
1294 * the hypervisor and puts the channel into a pseudo-listening
1295 * state. This does not initiate a handshake, ldc_connect() does
1298 int ldc_bind(struct ldc_channel
*lp
)
1300 unsigned long hv_err
, flags
;
1303 if (lp
->state
!= LDC_STATE_INIT
)
1306 spin_lock_irqsave(&lp
->lock
, flags
);
1308 enable_irq(lp
->cfg
.rx_irq
);
1309 enable_irq(lp
->cfg
.tx_irq
);
1311 lp
->flags
|= LDC_FLAG_REGISTERED_IRQS
;
1314 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1318 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
);
1322 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1326 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
);
1330 lp
->flags
|= LDC_FLAG_REGISTERED_QUEUES
;
1332 hv_err
= sun4v_ldc_tx_get_state(lp
->id
,
1340 lp
->tx_acked
= lp
->tx_head
;
1342 lp
->hs_state
= LDC_HS_OPEN
;
1343 ldc_set_state(lp
, LDC_STATE_BOUND
);
1345 spin_unlock_irqrestore(&lp
->lock
, flags
);
1350 lp
->flags
&= ~LDC_FLAG_REGISTERED_QUEUES
;
1351 sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1354 sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1357 lp
->flags
&= ~LDC_FLAG_REGISTERED_IRQS
;
1358 free_irq(lp
->cfg
.tx_irq
, lp
);
1359 free_irq(lp
->cfg
.rx_irq
, lp
);
1361 spin_unlock_irqrestore(&lp
->lock
, flags
);
1365 EXPORT_SYMBOL(ldc_bind
);
1367 int ldc_connect(struct ldc_channel
*lp
)
1369 unsigned long flags
;
1372 if (lp
->cfg
.mode
== LDC_MODE_RAW
)
1375 spin_lock_irqsave(&lp
->lock
, flags
);
1377 if (!(lp
->flags
& LDC_FLAG_ALLOCED_QUEUES
) ||
1378 !(lp
->flags
& LDC_FLAG_REGISTERED_QUEUES
) ||
1379 lp
->hs_state
!= LDC_HS_OPEN
)
1380 err
= ((lp
->hs_state
> LDC_HS_OPEN
) ? 0 : -EINVAL
);
1382 err
= start_handshake(lp
);
1384 spin_unlock_irqrestore(&lp
->lock
, flags
);
1388 EXPORT_SYMBOL(ldc_connect
);
1390 int ldc_disconnect(struct ldc_channel
*lp
)
1392 unsigned long hv_err
, flags
;
1395 if (lp
->cfg
.mode
== LDC_MODE_RAW
)
1398 if (!(lp
->flags
& LDC_FLAG_ALLOCED_QUEUES
) ||
1399 !(lp
->flags
& LDC_FLAG_REGISTERED_QUEUES
))
1402 spin_lock_irqsave(&lp
->lock
, flags
);
1405 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1409 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
);
1413 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1417 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
);
1421 ldc_set_state(lp
, LDC_STATE_BOUND
);
1422 lp
->hs_state
= LDC_HS_OPEN
;
1423 lp
->flags
|= LDC_FLAG_RESET
;
1425 spin_unlock_irqrestore(&lp
->lock
, flags
);
1430 sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1431 sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1432 free_irq(lp
->cfg
.tx_irq
, lp
);
1433 free_irq(lp
->cfg
.rx_irq
, lp
);
1434 lp
->flags
&= ~(LDC_FLAG_REGISTERED_IRQS
|
1435 LDC_FLAG_REGISTERED_QUEUES
);
1436 ldc_set_state(lp
, LDC_STATE_INIT
);
1438 spin_unlock_irqrestore(&lp
->lock
, flags
);
1442 EXPORT_SYMBOL(ldc_disconnect
);
1444 int ldc_state(struct ldc_channel
*lp
)
1448 EXPORT_SYMBOL(ldc_state
);
1450 static int write_raw(struct ldc_channel
*lp
, const void *buf
, unsigned int size
)
1452 struct ldc_packet
*p
;
1453 unsigned long new_tail
;
1456 if (size
> LDC_PACKET_SIZE
)
1459 p
= data_get_tx_packet(lp
, &new_tail
);
1463 memcpy(p
, buf
, size
);
1465 err
= send_tx_packet(lp
, p
, new_tail
);
1472 static int read_raw(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1474 struct ldc_packet
*p
;
1475 unsigned long hv_err
, new;
1478 if (size
< LDC_PACKET_SIZE
)
1481 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
1486 return ldc_abort(lp
);
1488 if (lp
->chan_state
== LDC_CHANNEL_DOWN
||
1489 lp
->chan_state
== LDC_CHANNEL_RESETTING
)
1492 if (lp
->rx_head
== lp
->rx_tail
)
1495 p
= lp
->rx_base
+ (lp
->rx_head
/ LDC_PACKET_SIZE
);
1496 memcpy(buf
, p
, LDC_PACKET_SIZE
);
1498 new = rx_advance(lp
, lp
->rx_head
);
1501 err
= __set_rx_head(lp
, new);
1505 err
= LDC_PACKET_SIZE
;
1510 static const struct ldc_mode_ops raw_ops
= {
1515 static int write_nonraw(struct ldc_channel
*lp
, const void *buf
,
1518 unsigned long hv_err
, tail
;
1519 unsigned int copied
;
1523 hv_err
= sun4v_ldc_tx_get_state(lp
->id
, &lp
->tx_head
, &lp
->tx_tail
,
1525 if (unlikely(hv_err
))
1528 if (unlikely(lp
->chan_state
!= LDC_CHANNEL_UP
))
1529 return ldc_abort(lp
);
1531 if (!tx_has_space_for(lp
, size
))
1537 while (copied
< size
) {
1538 struct ldc_packet
*p
= lp
->tx_base
+ (tail
/ LDC_PACKET_SIZE
);
1539 u8
*data
= ((lp
->cfg
.mode
== LDC_MODE_UNRELIABLE
) ?
1545 p
->stype
= LDC_INFO
;
1548 data_len
= size
- copied
;
1549 if (data_len
> lp
->mss
)
1552 BUG_ON(data_len
> LDC_LEN
);
1554 p
->env
= (data_len
|
1555 (copied
== 0 ? LDC_START
: 0) |
1556 (data_len
== size
- copied
? LDC_STOP
: 0));
1560 ldcdbg(DATA
, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
1567 memcpy(data
, buf
, data_len
);
1571 tail
= tx_advance(lp
, tail
);
1574 err
= set_tx_tail(lp
, tail
);
1583 static int rx_bad_seq(struct ldc_channel
*lp
, struct ldc_packet
*p
,
1584 struct ldc_packet
*first_frag
)
1589 lp
->rcv_nxt
= first_frag
->seqid
- 1;
1591 err
= send_data_nack(lp
, p
);
1595 err
= __set_rx_head(lp
, lp
->rx_tail
);
1597 return ldc_abort(lp
);
1602 static int data_ack_nack(struct ldc_channel
*lp
, struct ldc_packet
*p
)
1604 if (p
->stype
& LDC_ACK
) {
1605 int err
= process_data_ack(lp
, p
);
1609 if (p
->stype
& LDC_NACK
)
1610 return ldc_abort(lp
);
1615 static int rx_data_wait(struct ldc_channel
*lp
, unsigned long cur_head
)
1617 unsigned long dummy
;
1620 ldcdbg(DATA
, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
1621 cur_head
, lp
->rx_head
, lp
->rx_tail
);
1622 while (limit
-- > 0) {
1623 unsigned long hv_err
;
1625 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
1630 return ldc_abort(lp
);
1632 if (lp
->chan_state
== LDC_CHANNEL_DOWN
||
1633 lp
->chan_state
== LDC_CHANNEL_RESETTING
)
1636 if (cur_head
!= lp
->rx_tail
) {
1637 ldcdbg(DATA
, "DATA WAIT DONE "
1638 "head[%lx] tail[%lx] chan_state[%lx]\n",
1639 dummy
, lp
->rx_tail
, lp
->chan_state
);
1648 static int rx_set_head(struct ldc_channel
*lp
, unsigned long head
)
1650 int err
= __set_rx_head(lp
, head
);
1653 return ldc_abort(lp
);
1659 static void send_data_ack(struct ldc_channel
*lp
)
1661 unsigned long new_tail
;
1662 struct ldc_packet
*p
;
1664 p
= data_get_tx_packet(lp
, &new_tail
);
1668 memset(p
, 0, sizeof(*p
));
1672 p
->seqid
= lp
->snd_nxt
+ 1;
1673 p
->u
.r
.ackid
= lp
->rcv_nxt
;
1675 err
= send_tx_packet(lp
, p
, new_tail
);
1681 static int read_nonraw(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1683 struct ldc_packet
*first_frag
;
1684 unsigned long hv_err
, new;
1687 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
1692 return ldc_abort(lp
);
1694 if (lp
->chan_state
== LDC_CHANNEL_DOWN
||
1695 lp
->chan_state
== LDC_CHANNEL_RESETTING
)
1698 if (lp
->rx_head
== lp
->rx_tail
)
1705 struct ldc_packet
*p
;
1708 BUG_ON(new == lp
->rx_tail
);
1709 p
= lp
->rx_base
+ (new / LDC_PACKET_SIZE
);
1711 ldcdbg(RX
, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
1721 if (unlikely(!rx_seq_ok(lp
, p
->seqid
))) {
1722 err
= rx_bad_seq(lp
, p
, first_frag
);
1727 if (p
->type
& LDC_CTRL
) {
1728 err
= process_control_frame(lp
, p
);
1734 lp
->rcv_nxt
= p
->seqid
;
1736 if (!(p
->type
& LDC_DATA
)) {
1737 new = rx_advance(lp
, new);
1740 if (p
->stype
& (LDC_ACK
| LDC_NACK
)) {
1741 err
= data_ack_nack(lp
, p
);
1745 if (!(p
->stype
& LDC_INFO
)) {
1746 new = rx_advance(lp
, new);
1747 err
= rx_set_head(lp
, new);
1753 pkt_len
= p
->env
& LDC_LEN
;
1755 /* Every initial packet starts with the START bit set.
1757 * Singleton packets will have both START+STOP set.
1759 * Fragments will have START set in the first frame, STOP
1760 * set in the last frame, and neither bit set in middle
1761 * frames of the packet.
1763 * Therefore if we are at the beginning of a packet and
1764 * we don't see START, or we are in the middle of a fragmented
1765 * packet and do see START, we are unsynchronized and should
1766 * flush the RX queue.
1768 if ((first_frag
== NULL
&& !(p
->env
& LDC_START
)) ||
1769 (first_frag
!= NULL
&& (p
->env
& LDC_START
))) {
1771 new = rx_advance(lp
, new);
1773 err
= rx_set_head(lp
, new);
1783 if (pkt_len
> size
- copied
) {
1784 /* User didn't give us a big enough buffer,
1785 * what to do? This is a pretty serious error.
1787 * Since we haven't updated the RX ring head to
1788 * consume any of the packets, signal the error
1789 * to the user and just leave the RX ring alone.
1791 * This seems the best behavior because this allows
1792 * a user of the LDC layer to start with a small
1793 * RX buffer for ldc_read() calls and use -EMSGSIZE
1794 * as a cue to enlarge it's read buffer.
1800 /* Ok, we are gonna eat this one. */
1801 new = rx_advance(lp
, new);
1804 (lp
->cfg
.mode
== LDC_MODE_UNRELIABLE
?
1805 p
->u
.u_data
: p
->u
.r
.r_data
), pkt_len
);
1809 if (p
->env
& LDC_STOP
)
1813 if (new == lp
->rx_tail
) {
1814 err
= rx_data_wait(lp
, new);
1821 err
= rx_set_head(lp
, new);
1823 if (err
&& first_frag
)
1824 lp
->rcv_nxt
= first_frag
->seqid
- 1;
1828 if (err
> 0 && lp
->cfg
.mode
!= LDC_MODE_UNRELIABLE
)
1835 static const struct ldc_mode_ops nonraw_ops
= {
1836 .write
= write_nonraw
,
1837 .read
= read_nonraw
,
1840 static int write_stream(struct ldc_channel
*lp
, const void *buf
,
1843 if (size
> lp
->cfg
.mtu
)
1845 return write_nonraw(lp
, buf
, size
);
1848 static int read_stream(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1850 if (!lp
->mssbuf_len
) {
1851 int err
= read_nonraw(lp
, lp
->mssbuf
, lp
->cfg
.mtu
);
1855 lp
->mssbuf_len
= err
;
1859 if (size
> lp
->mssbuf_len
)
1860 size
= lp
->mssbuf_len
;
1861 memcpy(buf
, lp
->mssbuf
+ lp
->mssbuf_off
, size
);
1863 lp
->mssbuf_off
+= size
;
1864 lp
->mssbuf_len
-= size
;
1869 static const struct ldc_mode_ops stream_ops
= {
1870 .write
= write_stream
,
1871 .read
= read_stream
,
1874 int ldc_write(struct ldc_channel
*lp
, const void *buf
, unsigned int size
)
1876 unsigned long flags
;
1885 spin_lock_irqsave(&lp
->lock
, flags
);
1887 if (lp
->hs_state
!= LDC_HS_COMPLETE
)
1890 err
= lp
->mops
->write(lp
, buf
, size
);
1892 spin_unlock_irqrestore(&lp
->lock
, flags
);
1896 EXPORT_SYMBOL(ldc_write
);
1898 int ldc_read(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1900 unsigned long flags
;
1909 spin_lock_irqsave(&lp
->lock
, flags
);
1911 if (lp
->hs_state
!= LDC_HS_COMPLETE
)
1914 err
= lp
->mops
->read(lp
, buf
, size
);
1916 spin_unlock_irqrestore(&lp
->lock
, flags
);
1920 EXPORT_SYMBOL(ldc_read
);
1922 static u64
pagesize_code(void)
1924 switch (PAGE_SIZE
) {
1926 case (8ULL * 1024ULL):
1928 case (64ULL * 1024ULL):
1930 case (512ULL * 1024ULL):
1932 case (4ULL * 1024ULL * 1024ULL):
1934 case (32ULL * 1024ULL * 1024ULL):
1936 case (256ULL * 1024ULL * 1024ULL):
1941 static u64
make_cookie(u64 index
, u64 pgsz_code
, u64 page_offset
)
1943 return ((pgsz_code
<< COOKIE_PGSZ_CODE_SHIFT
) |
1944 (index
<< PAGE_SHIFT
) |
1949 static struct ldc_mtable_entry
*alloc_npages(struct ldc_iommu
*iommu
,
1950 unsigned long npages
)
1954 entry
= iommu_tbl_range_alloc(NULL
, &iommu
->iommu_map_table
,
1955 npages
, NULL
, (unsigned long)-1, 0);
1956 if (unlikely(entry
== IOMMU_ERROR_CODE
))
1959 return iommu
->page_table
+ entry
;
1962 static u64
perm_to_mte(unsigned int map_perm
)
1966 mte_base
= pagesize_code();
1968 if (map_perm
& LDC_MAP_SHADOW
) {
1969 if (map_perm
& LDC_MAP_R
)
1970 mte_base
|= LDC_MTE_COPY_R
;
1971 if (map_perm
& LDC_MAP_W
)
1972 mte_base
|= LDC_MTE_COPY_W
;
1974 if (map_perm
& LDC_MAP_DIRECT
) {
1975 if (map_perm
& LDC_MAP_R
)
1976 mte_base
|= LDC_MTE_READ
;
1977 if (map_perm
& LDC_MAP_W
)
1978 mte_base
|= LDC_MTE_WRITE
;
1979 if (map_perm
& LDC_MAP_X
)
1980 mte_base
|= LDC_MTE_EXEC
;
1982 if (map_perm
& LDC_MAP_IO
) {
1983 if (map_perm
& LDC_MAP_R
)
1984 mte_base
|= LDC_MTE_IOMMU_R
;
1985 if (map_perm
& LDC_MAP_W
)
1986 mte_base
|= LDC_MTE_IOMMU_W
;
1992 static int pages_in_region(unsigned long base
, long len
)
1997 unsigned long new = (base
+ PAGE_SIZE
) & PAGE_MASK
;
1999 len
-= (new - base
);
2007 struct cookie_state
{
2008 struct ldc_mtable_entry
*page_table
;
2009 struct ldc_trans_cookie
*cookies
;
2016 static void fill_cookies(struct cookie_state
*sp
, unsigned long pa
,
2017 unsigned long off
, unsigned long len
)
2020 unsigned long tlen
, new = pa
+ PAGE_SIZE
;
2023 sp
->page_table
[sp
->pte_idx
].mte
= sp
->mte_base
| pa
;
2027 tlen
= PAGE_SIZE
- off
;
2031 this_cookie
= make_cookie(sp
->pte_idx
,
2032 pagesize_code(), off
);
2036 if (this_cookie
== sp
->prev_cookie
) {
2037 sp
->cookies
[sp
->nc
- 1].cookie_size
+= tlen
;
2039 sp
->cookies
[sp
->nc
].cookie_addr
= this_cookie
;
2040 sp
->cookies
[sp
->nc
].cookie_size
= tlen
;
2043 sp
->prev_cookie
= this_cookie
+ tlen
;
2052 static int sg_count_one(struct scatterlist
*sg
)
2054 unsigned long base
= page_to_pfn(sg_page(sg
)) << PAGE_SHIFT
;
2055 long len
= sg
->length
;
2057 if ((sg
->offset
| len
) & (8UL - 1))
2060 return pages_in_region(base
+ sg
->offset
, len
);
2063 static int sg_count_pages(struct scatterlist
*sg
, int num_sg
)
2069 for (i
= 0; i
< num_sg
; i
++) {
2070 int err
= sg_count_one(sg
+ i
);
2079 int ldc_map_sg(struct ldc_channel
*lp
,
2080 struct scatterlist
*sg
, int num_sg
,
2081 struct ldc_trans_cookie
*cookies
, int ncookies
,
2082 unsigned int map_perm
)
2084 unsigned long i
, npages
;
2085 struct ldc_mtable_entry
*base
;
2086 struct cookie_state state
;
2087 struct ldc_iommu
*iommu
;
2089 struct scatterlist
*s
;
2091 if (map_perm
& ~LDC_MAP_ALL
)
2094 err
= sg_count_pages(sg
, num_sg
);
2104 base
= alloc_npages(iommu
, npages
);
2109 state
.page_table
= iommu
->page_table
;
2110 state
.cookies
= cookies
;
2111 state
.mte_base
= perm_to_mte(map_perm
);
2112 state
.prev_cookie
= ~(u64
)0;
2113 state
.pte_idx
= (base
- iommu
->page_table
);
2116 for_each_sg(sg
, s
, num_sg
, i
) {
2117 fill_cookies(&state
, page_to_pfn(sg_page(s
)) << PAGE_SHIFT
,
2118 s
->offset
, s
->length
);
2123 EXPORT_SYMBOL(ldc_map_sg
);
2125 int ldc_map_single(struct ldc_channel
*lp
,
2126 void *buf
, unsigned int len
,
2127 struct ldc_trans_cookie
*cookies
, int ncookies
,
2128 unsigned int map_perm
)
2130 unsigned long npages
, pa
;
2131 struct ldc_mtable_entry
*base
;
2132 struct cookie_state state
;
2133 struct ldc_iommu
*iommu
;
2135 if ((map_perm
& ~LDC_MAP_ALL
) || (ncookies
< 1))
2139 if ((pa
| len
) & (8UL - 1))
2142 npages
= pages_in_region(pa
, len
);
2146 base
= alloc_npages(iommu
, npages
);
2151 state
.page_table
= iommu
->page_table
;
2152 state
.cookies
= cookies
;
2153 state
.mte_base
= perm_to_mte(map_perm
);
2154 state
.prev_cookie
= ~(u64
)0;
2155 state
.pte_idx
= (base
- iommu
->page_table
);
2157 fill_cookies(&state
, (pa
& PAGE_MASK
), (pa
& ~PAGE_MASK
), len
);
2158 BUG_ON(state
.nc
> ncookies
);
2162 EXPORT_SYMBOL(ldc_map_single
);
2165 static void free_npages(unsigned long id
, struct ldc_iommu
*iommu
,
2166 u64 cookie
, u64 size
)
2168 unsigned long npages
, entry
;
2170 npages
= PAGE_ALIGN(((cookie
& ~PAGE_MASK
) + size
)) >> PAGE_SHIFT
;
2172 entry
= ldc_cookie_to_index(cookie
, iommu
);
2173 ldc_demap(iommu
, id
, cookie
, entry
, npages
);
2174 iommu_tbl_range_free(&iommu
->iommu_map_table
, cookie
, npages
, entry
);
2177 void ldc_unmap(struct ldc_channel
*lp
, struct ldc_trans_cookie
*cookies
,
2180 struct ldc_iommu
*iommu
= &lp
->iommu
;
2182 unsigned long flags
;
2184 spin_lock_irqsave(&iommu
->lock
, flags
);
2185 for (i
= 0; i
< ncookies
; i
++) {
2186 u64 addr
= cookies
[i
].cookie_addr
;
2187 u64 size
= cookies
[i
].cookie_size
;
2189 free_npages(lp
->id
, iommu
, addr
, size
);
2191 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2193 EXPORT_SYMBOL(ldc_unmap
);
2195 int ldc_copy(struct ldc_channel
*lp
, int copy_dir
,
2196 void *buf
, unsigned int len
, unsigned long offset
,
2197 struct ldc_trans_cookie
*cookies
, int ncookies
)
2199 unsigned int orig_len
;
2203 if (copy_dir
!= LDC_COPY_IN
&& copy_dir
!= LDC_COPY_OUT
) {
2204 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
2210 if ((ra
| len
| offset
) & (8UL - 1)) {
2211 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] Unaligned buffer "
2212 "ra[%lx] len[%x] offset[%lx]\n",
2213 lp
->id
, ra
, len
, offset
);
2217 if (lp
->hs_state
!= LDC_HS_COMPLETE
||
2218 (lp
->flags
& LDC_FLAG_RESET
)) {
2219 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] Link down hs_state[%x] "
2220 "flags[%x]\n", lp
->id
, lp
->hs_state
, lp
->flags
);
2225 for (i
= 0; i
< ncookies
; i
++) {
2226 unsigned long cookie_raddr
= cookies
[i
].cookie_addr
;
2227 unsigned long this_len
= cookies
[i
].cookie_size
;
2228 unsigned long actual_len
;
2230 if (unlikely(offset
)) {
2231 unsigned long this_off
= offset
;
2233 if (this_off
> this_len
)
2234 this_off
= this_len
;
2237 this_len
-= this_off
;
2240 cookie_raddr
+= this_off
;
2247 unsigned long hv_err
;
2249 hv_err
= sun4v_ldc_copy(lp
->id
, copy_dir
,
2251 this_len
, &actual_len
);
2252 if (unlikely(hv_err
)) {
2253 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] "
2256 if (lp
->hs_state
!= LDC_HS_COMPLETE
||
2257 (lp
->flags
& LDC_FLAG_RESET
))
2263 cookie_raddr
+= actual_len
;
2266 if (actual_len
== this_len
)
2269 this_len
-= actual_len
;
2276 /* It is caller policy what to do about short copies.
2277 * For example, a networking driver can declare the
2278 * packet a runt and drop it.
2281 return orig_len
- len
;
2283 EXPORT_SYMBOL(ldc_copy
);
2285 void *ldc_alloc_exp_dring(struct ldc_channel
*lp
, unsigned int len
,
2286 struct ldc_trans_cookie
*cookies
, int *ncookies
,
2287 unsigned int map_perm
)
2292 if (len
& (8UL - 1))
2293 return ERR_PTR(-EINVAL
);
2295 buf
= kzalloc(len
, GFP_ATOMIC
);
2297 return ERR_PTR(-ENOMEM
);
2299 err
= ldc_map_single(lp
, buf
, len
, cookies
, *ncookies
, map_perm
);
2302 return ERR_PTR(err
);
2308 EXPORT_SYMBOL(ldc_alloc_exp_dring
);
2310 void ldc_free_exp_dring(struct ldc_channel
*lp
, void *buf
, unsigned int len
,
2311 struct ldc_trans_cookie
*cookies
, int ncookies
)
2313 ldc_unmap(lp
, cookies
, ncookies
);
2316 EXPORT_SYMBOL(ldc_free_exp_dring
);
2318 static int __init
ldc_init(void)
2320 unsigned long major
, minor
;
2321 struct mdesc_handle
*hp
;
2330 mp
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "platform");
2332 if (mp
== MDESC_NODE_NULL
)
2335 v
= mdesc_get_property(hp
, mp
, "domaining-enabled", NULL
);
2341 if (sun4v_hvapi_register(HV_GRP_LDOM
, major
, &minor
)) {
2342 printk(KERN_INFO PFX
"Could not register LDOM hvapi.\n");
2346 printk(KERN_INFO
"%s", version
);
2349 printk(KERN_INFO PFX
"Domaining disabled.\n");
2352 ldom_domaining_enabled
= 1;
2360 core_initcall(ldc_init
);