1 /* ldc.c: Logical Domain Channel link-layer protocol driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/scatterlist.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/init.h>
18 #include <asm/hypervisor.h>
19 #include <asm/iommu.h>
22 #include <asm/mdesc.h>
24 #define DRV_MODULE_NAME "ldc"
25 #define PFX DRV_MODULE_NAME ": "
26 #define DRV_MODULE_VERSION "1.1"
27 #define DRV_MODULE_RELDATE "July 22, 2008"
29 static char version
[] __devinitdata
=
30 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
31 #define LDC_PACKET_SIZE 64
33 /* Packet header layout for unreliable and reliable mode frames.
34 * When in RAW mode, packets are simply straight 64-byte payloads
49 #define LDC_VERS 0x01 /* Link Version */
50 #define LDC_RTS 0x02 /* Request To Send */
51 #define LDC_RTR 0x03 /* Ready To Receive */
52 #define LDC_RDX 0x04 /* Ready for Data eXchange */
53 #define LDC_CTRL_MSK 0x0f
57 #define LDC_FRAG_MASK 0xc0
58 #define LDC_START 0x40
64 u8 u_data
[LDC_PACKET_SIZE
- 8];
68 u8 r_data
[LDC_PACKET_SIZE
- 8 - 8];
78 /* Ordered from largest major to lowest. */
79 static struct ldc_version ver_arr
[] = {
80 { .major
= 1, .minor
= 0 },
83 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE)
84 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE)
89 int (*write
)(struct ldc_channel
*, const void *, unsigned int);
90 int (*read
)(struct ldc_channel
*, void *, unsigned int);
93 static const struct ldc_mode_ops raw_ops
;
94 static const struct ldc_mode_ops nonraw_ops
;
95 static const struct ldc_mode_ops stream_ops
;
97 int ldom_domaining_enabled
;
100 /* Protects arena alloc/free. */
102 struct iommu_arena arena
;
103 struct ldc_mtable_entry
*page_table
;
107 /* Protects all operations that depend upon channel state. */
116 struct ldc_packet
*tx_base
;
117 unsigned long tx_head
;
118 unsigned long tx_tail
;
119 unsigned long tx_num_entries
;
122 unsigned long tx_acked
;
124 struct ldc_packet
*rx_base
;
125 unsigned long rx_head
;
126 unsigned long rx_tail
;
127 unsigned long rx_num_entries
;
133 unsigned long chan_state
;
135 struct ldc_channel_config cfg
;
138 const struct ldc_mode_ops
*mops
;
140 struct ldc_iommu iommu
;
142 struct ldc_version ver
;
145 #define LDC_HS_CLOSED 0x00
146 #define LDC_HS_OPEN 0x01
147 #define LDC_HS_GOTVERS 0x02
148 #define LDC_HS_SENTRTR 0x03
149 #define LDC_HS_GOTRTR 0x04
150 #define LDC_HS_COMPLETE 0x10
153 #define LDC_FLAG_ALLOCED_QUEUES 0x01
154 #define LDC_FLAG_REGISTERED_QUEUES 0x02
155 #define LDC_FLAG_REGISTERED_IRQS 0x04
156 #define LDC_FLAG_RESET 0x10
161 #define LDC_IRQ_NAME_MAX 32
162 char rx_irq_name
[LDC_IRQ_NAME_MAX
];
163 char tx_irq_name
[LDC_IRQ_NAME_MAX
];
165 struct hlist_head mh_list
;
167 struct hlist_node list
;
170 #define ldcdbg(TYPE, f, a...) \
171 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
172 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
175 static const char *state_to_str(u8 state
)
178 case LDC_STATE_INVALID
:
182 case LDC_STATE_BOUND
:
184 case LDC_STATE_READY
:
186 case LDC_STATE_CONNECTED
:
193 static void ldc_set_state(struct ldc_channel
*lp
, u8 state
)
195 ldcdbg(STATE
, "STATE (%s) --> (%s)\n",
196 state_to_str(lp
->state
),
197 state_to_str(state
));
202 static unsigned long __advance(unsigned long off
, unsigned long num_entries
)
204 off
+= LDC_PACKET_SIZE
;
205 if (off
== (num_entries
* LDC_PACKET_SIZE
))
211 static unsigned long rx_advance(struct ldc_channel
*lp
, unsigned long off
)
213 return __advance(off
, lp
->rx_num_entries
);
216 static unsigned long tx_advance(struct ldc_channel
*lp
, unsigned long off
)
218 return __advance(off
, lp
->tx_num_entries
);
221 static struct ldc_packet
*handshake_get_tx_packet(struct ldc_channel
*lp
,
222 unsigned long *new_tail
)
224 struct ldc_packet
*p
;
227 t
= tx_advance(lp
, lp
->tx_tail
);
228 if (t
== lp
->tx_head
)
234 return p
+ (lp
->tx_tail
/ LDC_PACKET_SIZE
);
237 /* When we are in reliable or stream mode, have to track the next packet
238 * we haven't gotten an ACK for in the TX queue using tx_acked. We have
239 * to be careful not to stomp over the queue past that point. During
240 * the handshake, we don't have TX data packets pending in the queue
241 * and that's why handshake_get_tx_packet() need not be mindful of
244 static unsigned long head_for_data(struct ldc_channel
*lp
)
246 if (lp
->cfg
.mode
== LDC_MODE_STREAM
)
251 static int tx_has_space_for(struct ldc_channel
*lp
, unsigned int size
)
253 unsigned long limit
, tail
, new_tail
, diff
;
256 limit
= head_for_data(lp
);
258 new_tail
= tx_advance(lp
, tail
);
259 if (new_tail
== limit
)
262 if (limit
> new_tail
)
263 diff
= limit
- new_tail
;
266 ((lp
->tx_num_entries
* LDC_PACKET_SIZE
) - new_tail
));
267 diff
/= LDC_PACKET_SIZE
;
270 if (diff
* mss
< size
)
276 static struct ldc_packet
*data_get_tx_packet(struct ldc_channel
*lp
,
277 unsigned long *new_tail
)
279 struct ldc_packet
*p
;
282 h
= head_for_data(lp
);
283 t
= tx_advance(lp
, lp
->tx_tail
);
290 return p
+ (lp
->tx_tail
/ LDC_PACKET_SIZE
);
293 static int set_tx_tail(struct ldc_channel
*lp
, unsigned long tail
)
295 unsigned long orig_tail
= lp
->tx_tail
;
299 while (limit
-- > 0) {
302 err
= sun4v_ldc_tx_set_qtail(lp
->id
, tail
);
306 if (err
!= HV_EWOULDBLOCK
) {
307 lp
->tx_tail
= orig_tail
;
313 lp
->tx_tail
= orig_tail
;
317 /* This just updates the head value in the hypervisor using
318 * a polling loop with a timeout. The caller takes care of
319 * upating software state representing the head change, if any.
321 static int __set_rx_head(struct ldc_channel
*lp
, unsigned long head
)
325 while (limit
-- > 0) {
328 err
= sun4v_ldc_rx_set_qhead(lp
->id
, head
);
332 if (err
!= HV_EWOULDBLOCK
)
341 static int send_tx_packet(struct ldc_channel
*lp
,
342 struct ldc_packet
*p
,
343 unsigned long new_tail
)
345 BUG_ON(p
!= (lp
->tx_base
+ (lp
->tx_tail
/ LDC_PACKET_SIZE
)));
347 return set_tx_tail(lp
, new_tail
);
350 static struct ldc_packet
*handshake_compose_ctrl(struct ldc_channel
*lp
,
352 void *data
, int dlen
,
353 unsigned long *new_tail
)
355 struct ldc_packet
*p
= handshake_get_tx_packet(lp
, new_tail
);
358 memset(p
, 0, sizeof(*p
));
363 memcpy(p
->u
.u_data
, data
, dlen
);
368 static int start_handshake(struct ldc_channel
*lp
)
370 struct ldc_packet
*p
;
371 struct ldc_version
*ver
;
372 unsigned long new_tail
;
376 ldcdbg(HS
, "SEND VER INFO maj[%u] min[%u]\n",
377 ver
->major
, ver
->minor
);
379 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_VERS
,
380 ver
, sizeof(*ver
), &new_tail
);
382 int err
= send_tx_packet(lp
, p
, new_tail
);
384 lp
->flags
&= ~LDC_FLAG_RESET
;
390 static int send_version_nack(struct ldc_channel
*lp
,
391 u16 major
, u16 minor
)
393 struct ldc_packet
*p
;
394 struct ldc_version ver
;
395 unsigned long new_tail
;
400 p
= handshake_compose_ctrl(lp
, LDC_NACK
, LDC_VERS
,
401 &ver
, sizeof(ver
), &new_tail
);
403 ldcdbg(HS
, "SEND VER NACK maj[%u] min[%u]\n",
404 ver
.major
, ver
.minor
);
406 return send_tx_packet(lp
, p
, new_tail
);
411 static int send_version_ack(struct ldc_channel
*lp
,
412 struct ldc_version
*vp
)
414 struct ldc_packet
*p
;
415 unsigned long new_tail
;
417 p
= handshake_compose_ctrl(lp
, LDC_ACK
, LDC_VERS
,
418 vp
, sizeof(*vp
), &new_tail
);
420 ldcdbg(HS
, "SEND VER ACK maj[%u] min[%u]\n",
421 vp
->major
, vp
->minor
);
423 return send_tx_packet(lp
, p
, new_tail
);
428 static int send_rts(struct ldc_channel
*lp
)
430 struct ldc_packet
*p
;
431 unsigned long new_tail
;
433 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_RTS
, NULL
, 0,
436 p
->env
= lp
->cfg
.mode
;
440 ldcdbg(HS
, "SEND RTS env[0x%x] seqid[0x%x]\n",
443 return send_tx_packet(lp
, p
, new_tail
);
448 static int send_rtr(struct ldc_channel
*lp
)
450 struct ldc_packet
*p
;
451 unsigned long new_tail
;
453 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_RTR
, NULL
, 0,
456 p
->env
= lp
->cfg
.mode
;
459 ldcdbg(HS
, "SEND RTR env[0x%x] seqid[0x%x]\n",
462 return send_tx_packet(lp
, p
, new_tail
);
467 static int send_rdx(struct ldc_channel
*lp
)
469 struct ldc_packet
*p
;
470 unsigned long new_tail
;
472 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_RDX
, NULL
, 0,
476 p
->seqid
= ++lp
->snd_nxt
;
477 p
->u
.r
.ackid
= lp
->rcv_nxt
;
479 ldcdbg(HS
, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n",
480 p
->env
, p
->seqid
, p
->u
.r
.ackid
);
482 return send_tx_packet(lp
, p
, new_tail
);
487 static int send_data_nack(struct ldc_channel
*lp
, struct ldc_packet
*data_pkt
)
489 struct ldc_packet
*p
;
490 unsigned long new_tail
;
493 p
= data_get_tx_packet(lp
, &new_tail
);
496 memset(p
, 0, sizeof(*p
));
497 p
->type
= data_pkt
->type
;
499 p
->ctrl
= data_pkt
->ctrl
& LDC_CTRL_MSK
;
500 p
->seqid
= lp
->snd_nxt
+ 1;
501 p
->u
.r
.ackid
= lp
->rcv_nxt
;
503 ldcdbg(HS
, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n",
504 p
->type
, p
->ctrl
, p
->seqid
, p
->u
.r
.ackid
);
506 err
= send_tx_packet(lp
, p
, new_tail
);
513 static int ldc_abort(struct ldc_channel
*lp
)
515 unsigned long hv_err
;
517 ldcdbg(STATE
, "ABORT\n");
519 /* We report but do not act upon the hypervisor errors because
520 * there really isn't much we can do if they fail at this point.
522 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
);
524 printk(KERN_ERR PFX
"ldc_abort: "
525 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
526 lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
, hv_err
);
528 hv_err
= sun4v_ldc_tx_get_state(lp
->id
,
533 printk(KERN_ERR PFX
"ldc_abort: "
534 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n",
537 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
);
539 printk(KERN_ERR PFX
"ldc_abort: "
540 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n",
541 lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
, hv_err
);
543 /* Refetch the RX queue state as well, because we could be invoked
544 * here in the queue processing context.
546 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
551 printk(KERN_ERR PFX
"ldc_abort: "
552 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n",
558 static struct ldc_version
*find_by_major(u16 major
)
560 struct ldc_version
*ret
= NULL
;
563 for (i
= 0; i
< ARRAY_SIZE(ver_arr
); i
++) {
564 struct ldc_version
*v
= &ver_arr
[i
];
565 if (v
->major
<= major
) {
573 static int process_ver_info(struct ldc_channel
*lp
, struct ldc_version
*vp
)
575 struct ldc_version
*vap
;
578 ldcdbg(HS
, "GOT VERSION INFO major[%x] minor[%x]\n",
579 vp
->major
, vp
->minor
);
581 if (lp
->hs_state
== LDC_HS_GOTVERS
) {
582 lp
->hs_state
= LDC_HS_OPEN
;
583 memset(&lp
->ver
, 0, sizeof(lp
->ver
));
586 vap
= find_by_major(vp
->major
);
588 err
= send_version_nack(lp
, 0, 0);
589 } else if (vap
->major
!= vp
->major
) {
590 err
= send_version_nack(lp
, vap
->major
, vap
->minor
);
592 struct ldc_version ver
= *vp
;
593 if (ver
.minor
> vap
->minor
)
594 ver
.minor
= vap
->minor
;
595 err
= send_version_ack(lp
, &ver
);
598 lp
->hs_state
= LDC_HS_GOTVERS
;
602 return ldc_abort(lp
);
607 static int process_ver_ack(struct ldc_channel
*lp
, struct ldc_version
*vp
)
609 ldcdbg(HS
, "GOT VERSION ACK major[%x] minor[%x]\n",
610 vp
->major
, vp
->minor
);
612 if (lp
->hs_state
== LDC_HS_GOTVERS
) {
613 if (lp
->ver
.major
!= vp
->major
||
614 lp
->ver
.minor
!= vp
->minor
)
615 return ldc_abort(lp
);
618 lp
->hs_state
= LDC_HS_GOTVERS
;
621 return ldc_abort(lp
);
625 static int process_ver_nack(struct ldc_channel
*lp
, struct ldc_version
*vp
)
627 struct ldc_version
*vap
;
628 struct ldc_packet
*p
;
629 unsigned long new_tail
;
631 if (vp
->major
== 0 && vp
->minor
== 0)
632 return ldc_abort(lp
);
634 vap
= find_by_major(vp
->major
);
636 return ldc_abort(lp
);
638 p
= handshake_compose_ctrl(lp
, LDC_INFO
, LDC_VERS
,
642 return ldc_abort(lp
);
644 return send_tx_packet(lp
, p
, new_tail
);
647 static int process_version(struct ldc_channel
*lp
,
648 struct ldc_packet
*p
)
650 struct ldc_version
*vp
;
652 vp
= (struct ldc_version
*) p
->u
.u_data
;
656 return process_ver_info(lp
, vp
);
659 return process_ver_ack(lp
, vp
);
662 return process_ver_nack(lp
, vp
);
665 return ldc_abort(lp
);
669 static int process_rts(struct ldc_channel
*lp
,
670 struct ldc_packet
*p
)
672 ldcdbg(HS
, "GOT RTS stype[%x] seqid[%x] env[%x]\n",
673 p
->stype
, p
->seqid
, p
->env
);
675 if (p
->stype
!= LDC_INFO
||
676 lp
->hs_state
!= LDC_HS_GOTVERS
||
677 p
->env
!= lp
->cfg
.mode
)
678 return ldc_abort(lp
);
680 lp
->snd_nxt
= p
->seqid
;
681 lp
->rcv_nxt
= p
->seqid
;
682 lp
->hs_state
= LDC_HS_SENTRTR
;
684 return ldc_abort(lp
);
689 static int process_rtr(struct ldc_channel
*lp
,
690 struct ldc_packet
*p
)
692 ldcdbg(HS
, "GOT RTR stype[%x] seqid[%x] env[%x]\n",
693 p
->stype
, p
->seqid
, p
->env
);
695 if (p
->stype
!= LDC_INFO
||
696 p
->env
!= lp
->cfg
.mode
)
697 return ldc_abort(lp
);
699 lp
->snd_nxt
= p
->seqid
;
700 lp
->hs_state
= LDC_HS_COMPLETE
;
701 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
707 static int rx_seq_ok(struct ldc_channel
*lp
, u32 seqid
)
709 return lp
->rcv_nxt
+ 1 == seqid
;
712 static int process_rdx(struct ldc_channel
*lp
,
713 struct ldc_packet
*p
)
715 ldcdbg(HS
, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n",
716 p
->stype
, p
->seqid
, p
->env
, p
->u
.r
.ackid
);
718 if (p
->stype
!= LDC_INFO
||
719 !(rx_seq_ok(lp
, p
->seqid
)))
720 return ldc_abort(lp
);
722 lp
->rcv_nxt
= p
->seqid
;
724 lp
->hs_state
= LDC_HS_COMPLETE
;
725 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
730 static int process_control_frame(struct ldc_channel
*lp
,
731 struct ldc_packet
*p
)
735 return process_version(lp
, p
);
738 return process_rts(lp
, p
);
741 return process_rtr(lp
, p
);
744 return process_rdx(lp
, p
);
747 return ldc_abort(lp
);
751 static int process_error_frame(struct ldc_channel
*lp
,
752 struct ldc_packet
*p
)
754 return ldc_abort(lp
);
757 static int process_data_ack(struct ldc_channel
*lp
,
758 struct ldc_packet
*ack
)
760 unsigned long head
= lp
->tx_acked
;
761 u32 ackid
= ack
->u
.r
.ackid
;
764 struct ldc_packet
*p
= lp
->tx_base
+ (head
/ LDC_PACKET_SIZE
);
766 head
= tx_advance(lp
, head
);
768 if (p
->seqid
== ackid
) {
772 if (head
== lp
->tx_tail
)
773 return ldc_abort(lp
);
779 static void send_events(struct ldc_channel
*lp
, unsigned int event_mask
)
781 if (event_mask
& LDC_EVENT_RESET
)
782 lp
->cfg
.event(lp
->event_arg
, LDC_EVENT_RESET
);
783 if (event_mask
& LDC_EVENT_UP
)
784 lp
->cfg
.event(lp
->event_arg
, LDC_EVENT_UP
);
785 if (event_mask
& LDC_EVENT_DATA_READY
)
786 lp
->cfg
.event(lp
->event_arg
, LDC_EVENT_DATA_READY
);
789 static irqreturn_t
ldc_rx(int irq
, void *dev_id
)
791 struct ldc_channel
*lp
= dev_id
;
792 unsigned long orig_state
, hv_err
, flags
;
793 unsigned int event_mask
;
795 spin_lock_irqsave(&lp
->lock
, flags
);
797 orig_state
= lp
->chan_state
;
798 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
803 ldcdbg(RX
, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
804 orig_state
, lp
->chan_state
, lp
->rx_head
, lp
->rx_tail
);
808 if (lp
->cfg
.mode
== LDC_MODE_RAW
&&
809 lp
->chan_state
== LDC_CHANNEL_UP
) {
810 lp
->hs_state
= LDC_HS_COMPLETE
;
811 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
813 event_mask
|= LDC_EVENT_UP
;
815 orig_state
= lp
->chan_state
;
818 /* If we are in reset state, flush the RX queue and ignore
821 if (lp
->flags
& LDC_FLAG_RESET
) {
822 (void) __set_rx_head(lp
, lp
->rx_tail
);
826 /* Once we finish the handshake, we let the ldc_read()
827 * paths do all of the control frame and state management.
828 * Just trigger the callback.
830 if (lp
->hs_state
== LDC_HS_COMPLETE
) {
832 if (lp
->chan_state
!= orig_state
) {
833 unsigned int event
= LDC_EVENT_RESET
;
835 if (lp
->chan_state
== LDC_CHANNEL_UP
)
836 event
= LDC_EVENT_UP
;
840 if (lp
->rx_head
!= lp
->rx_tail
)
841 event_mask
|= LDC_EVENT_DATA_READY
;
846 if (lp
->chan_state
!= orig_state
)
849 while (lp
->rx_head
!= lp
->rx_tail
) {
850 struct ldc_packet
*p
;
854 p
= lp
->rx_base
+ (lp
->rx_head
/ LDC_PACKET_SIZE
);
858 err
= process_control_frame(lp
, p
);
864 event_mask
|= LDC_EVENT_DATA_READY
;
869 err
= process_error_frame(lp
, p
);
881 new += LDC_PACKET_SIZE
;
882 if (new == (lp
->rx_num_entries
* LDC_PACKET_SIZE
))
886 err
= __set_rx_head(lp
, new);
888 (void) ldc_abort(lp
);
891 if (lp
->hs_state
== LDC_HS_COMPLETE
)
892 goto handshake_complete
;
896 spin_unlock_irqrestore(&lp
->lock
, flags
);
898 send_events(lp
, event_mask
);
903 static irqreturn_t
ldc_tx(int irq
, void *dev_id
)
905 struct ldc_channel
*lp
= dev_id
;
906 unsigned long flags
, hv_err
, orig_state
;
907 unsigned int event_mask
= 0;
909 spin_lock_irqsave(&lp
->lock
, flags
);
911 orig_state
= lp
->chan_state
;
912 hv_err
= sun4v_ldc_tx_get_state(lp
->id
,
917 ldcdbg(TX
, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
918 orig_state
, lp
->chan_state
, lp
->tx_head
, lp
->tx_tail
);
920 if (lp
->cfg
.mode
== LDC_MODE_RAW
&&
921 lp
->chan_state
== LDC_CHANNEL_UP
) {
922 lp
->hs_state
= LDC_HS_COMPLETE
;
923 ldc_set_state(lp
, LDC_STATE_CONNECTED
);
925 event_mask
|= LDC_EVENT_UP
;
928 spin_unlock_irqrestore(&lp
->lock
, flags
);
930 send_events(lp
, event_mask
);
935 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so
936 * XXX that addition and removal from the ldc_channel_list has
937 * XXX atomicity, otherwise the __ldc_channel_exists() check is
938 * XXX totally pointless as another thread can slip into ldc_alloc()
939 * XXX and add a channel with the same ID. There also needs to be
940 * XXX a spinlock for ldc_channel_list.
942 static HLIST_HEAD(ldc_channel_list
);
944 static int __ldc_channel_exists(unsigned long id
)
946 struct ldc_channel
*lp
;
947 struct hlist_node
*n
;
949 hlist_for_each_entry(lp
, n
, &ldc_channel_list
, list
) {
956 static int alloc_queue(const char *name
, unsigned long num_entries
,
957 struct ldc_packet
**base
, unsigned long *ra
)
959 unsigned long size
, order
;
962 size
= num_entries
* LDC_PACKET_SIZE
;
963 order
= get_order(size
);
965 q
= (void *) __get_free_pages(GFP_KERNEL
, order
);
967 printk(KERN_ERR PFX
"Alloc of %s queue failed with "
968 "size=%lu order=%lu\n", name
, size
, order
);
972 memset(q
, 0, PAGE_SIZE
<< order
);
980 static void free_queue(unsigned long num_entries
, struct ldc_packet
*q
)
982 unsigned long size
, order
;
987 size
= num_entries
* LDC_PACKET_SIZE
;
988 order
= get_order(size
);
990 free_pages((unsigned long)q
, order
);
993 /* XXX Make this configurable... XXX */
994 #define LDC_IOTABLE_SIZE (8 * 1024)
996 static int ldc_iommu_init(struct ldc_channel
*lp
)
998 unsigned long sz
, num_tsb_entries
, tsbsize
, order
;
999 struct ldc_iommu
*iommu
= &lp
->iommu
;
1000 struct ldc_mtable_entry
*table
;
1001 unsigned long hv_err
;
1004 num_tsb_entries
= LDC_IOTABLE_SIZE
;
1005 tsbsize
= num_tsb_entries
* sizeof(struct ldc_mtable_entry
);
1007 spin_lock_init(&iommu
->lock
);
1009 sz
= num_tsb_entries
/ 8;
1010 sz
= (sz
+ 7UL) & ~7UL;
1011 iommu
->arena
.map
= kzalloc(sz
, GFP_KERNEL
);
1012 if (!iommu
->arena
.map
) {
1013 printk(KERN_ERR PFX
"Alloc of arena map failed, sz=%lu\n", sz
);
1017 iommu
->arena
.limit
= num_tsb_entries
;
1019 order
= get_order(tsbsize
);
1021 table
= (struct ldc_mtable_entry
*)
1022 __get_free_pages(GFP_KERNEL
, order
);
1025 printk(KERN_ERR PFX
"Alloc of MTE table failed, "
1026 "size=%lu order=%lu\n", tsbsize
, order
);
1030 memset(table
, 0, PAGE_SIZE
<< order
);
1032 iommu
->page_table
= table
;
1034 hv_err
= sun4v_ldc_set_map_table(lp
->id
, __pa(table
),
1038 goto out_free_table
;
1043 free_pages((unsigned long) table
, order
);
1044 iommu
->page_table
= NULL
;
1047 kfree(iommu
->arena
.map
);
1048 iommu
->arena
.map
= NULL
;
1053 static void ldc_iommu_release(struct ldc_channel
*lp
)
1055 struct ldc_iommu
*iommu
= &lp
->iommu
;
1056 unsigned long num_tsb_entries
, tsbsize
, order
;
1058 (void) sun4v_ldc_set_map_table(lp
->id
, 0, 0);
1060 num_tsb_entries
= iommu
->arena
.limit
;
1061 tsbsize
= num_tsb_entries
* sizeof(struct ldc_mtable_entry
);
1062 order
= get_order(tsbsize
);
1064 free_pages((unsigned long) iommu
->page_table
, order
);
1065 iommu
->page_table
= NULL
;
1067 kfree(iommu
->arena
.map
);
1068 iommu
->arena
.map
= NULL
;
1071 struct ldc_channel
*ldc_alloc(unsigned long id
,
1072 const struct ldc_channel_config
*cfgp
,
1075 struct ldc_channel
*lp
;
1076 const struct ldc_mode_ops
*mops
;
1077 unsigned long dummy1
, dummy2
, hv_err
;
1082 if (!ldom_domaining_enabled
)
1089 switch (cfgp
->mode
) {
1092 mss
= LDC_PACKET_SIZE
;
1095 case LDC_MODE_UNRELIABLE
:
1097 mss
= LDC_PACKET_SIZE
- 8;
1100 case LDC_MODE_STREAM
:
1102 mss
= LDC_PACKET_SIZE
- 8 - 8;
1109 if (!cfgp
->event
|| !event_arg
|| !cfgp
->rx_irq
|| !cfgp
->tx_irq
)
1112 hv_err
= sun4v_ldc_tx_qinfo(id
, &dummy1
, &dummy2
);
1114 if (hv_err
== HV_ECHANNEL
)
1118 if (__ldc_channel_exists(id
))
1123 lp
= kzalloc(sizeof(*lp
), GFP_KERNEL
);
1128 spin_lock_init(&lp
->lock
);
1132 err
= ldc_iommu_init(lp
);
1141 lp
->cfg
.mtu
= LDC_DEFAULT_MTU
;
1143 if (lp
->cfg
.mode
== LDC_MODE_STREAM
) {
1144 mssbuf
= kzalloc(lp
->cfg
.mtu
, GFP_KERNEL
);
1147 goto out_free_iommu
;
1149 lp
->mssbuf
= mssbuf
;
1152 lp
->event_arg
= event_arg
;
1154 /* XXX allow setting via ldc_channel_config to override defaults
1155 * XXX or use some formula based upon mtu
1157 lp
->tx_num_entries
= LDC_DEFAULT_NUM_ENTRIES
;
1158 lp
->rx_num_entries
= LDC_DEFAULT_NUM_ENTRIES
;
1160 err
= alloc_queue("TX", lp
->tx_num_entries
,
1161 &lp
->tx_base
, &lp
->tx_ra
);
1163 goto out_free_mssbuf
;
1165 err
= alloc_queue("RX", lp
->rx_num_entries
,
1166 &lp
->rx_base
, &lp
->rx_ra
);
1170 lp
->flags
|= LDC_FLAG_ALLOCED_QUEUES
;
1172 lp
->hs_state
= LDC_HS_CLOSED
;
1173 ldc_set_state(lp
, LDC_STATE_INIT
);
1175 INIT_HLIST_NODE(&lp
->list
);
1176 hlist_add_head(&lp
->list
, &ldc_channel_list
);
1178 INIT_HLIST_HEAD(&lp
->mh_list
);
1183 free_queue(lp
->tx_num_entries
, lp
->tx_base
);
1189 ldc_iommu_release(lp
);
1195 return ERR_PTR(err
);
1197 EXPORT_SYMBOL(ldc_alloc
);
1199 void ldc_free(struct ldc_channel
*lp
)
1201 if (lp
->flags
& LDC_FLAG_REGISTERED_IRQS
) {
1202 free_irq(lp
->cfg
.rx_irq
, lp
);
1203 free_irq(lp
->cfg
.tx_irq
, lp
);
1206 if (lp
->flags
& LDC_FLAG_REGISTERED_QUEUES
) {
1207 sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1208 sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1209 lp
->flags
&= ~LDC_FLAG_REGISTERED_QUEUES
;
1211 if (lp
->flags
& LDC_FLAG_ALLOCED_QUEUES
) {
1212 free_queue(lp
->tx_num_entries
, lp
->tx_base
);
1213 free_queue(lp
->rx_num_entries
, lp
->rx_base
);
1214 lp
->flags
&= ~LDC_FLAG_ALLOCED_QUEUES
;
1217 hlist_del(&lp
->list
);
1221 ldc_iommu_release(lp
);
1225 EXPORT_SYMBOL(ldc_free
);
1227 /* Bind the channel. This registers the LDC queues with
1228 * the hypervisor and puts the channel into a pseudo-listening
1229 * state. This does not initiate a handshake, ldc_connect() does
1232 int ldc_bind(struct ldc_channel
*lp
, const char *name
)
1234 unsigned long hv_err
, flags
;
1238 (lp
->state
!= LDC_STATE_INIT
))
1241 snprintf(lp
->rx_irq_name
, LDC_IRQ_NAME_MAX
, "%s RX", name
);
1242 snprintf(lp
->tx_irq_name
, LDC_IRQ_NAME_MAX
, "%s TX", name
);
1244 err
= request_irq(lp
->cfg
.rx_irq
, ldc_rx
,
1245 IRQF_SAMPLE_RANDOM
| IRQF_SHARED
,
1246 lp
->rx_irq_name
, lp
);
1250 err
= request_irq(lp
->cfg
.tx_irq
, ldc_tx
,
1251 IRQF_SAMPLE_RANDOM
| IRQF_SHARED
,
1252 lp
->tx_irq_name
, lp
);
1254 free_irq(lp
->cfg
.rx_irq
, lp
);
1259 spin_lock_irqsave(&lp
->lock
, flags
);
1261 enable_irq(lp
->cfg
.rx_irq
);
1262 enable_irq(lp
->cfg
.tx_irq
);
1264 lp
->flags
|= LDC_FLAG_REGISTERED_IRQS
;
1267 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1271 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
);
1275 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1279 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
);
1283 lp
->flags
|= LDC_FLAG_REGISTERED_QUEUES
;
1285 hv_err
= sun4v_ldc_tx_get_state(lp
->id
,
1293 lp
->tx_acked
= lp
->tx_head
;
1295 lp
->hs_state
= LDC_HS_OPEN
;
1296 ldc_set_state(lp
, LDC_STATE_BOUND
);
1298 spin_unlock_irqrestore(&lp
->lock
, flags
);
1303 lp
->flags
&= ~LDC_FLAG_REGISTERED_QUEUES
;
1304 sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1307 sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1310 lp
->flags
&= ~LDC_FLAG_REGISTERED_IRQS
;
1311 free_irq(lp
->cfg
.tx_irq
, lp
);
1312 free_irq(lp
->cfg
.rx_irq
, lp
);
1314 spin_unlock_irqrestore(&lp
->lock
, flags
);
1318 EXPORT_SYMBOL(ldc_bind
);
1320 int ldc_connect(struct ldc_channel
*lp
)
1322 unsigned long flags
;
1325 if (lp
->cfg
.mode
== LDC_MODE_RAW
)
1328 spin_lock_irqsave(&lp
->lock
, flags
);
1330 if (!(lp
->flags
& LDC_FLAG_ALLOCED_QUEUES
) ||
1331 !(lp
->flags
& LDC_FLAG_REGISTERED_QUEUES
) ||
1332 lp
->hs_state
!= LDC_HS_OPEN
)
1335 err
= start_handshake(lp
);
1337 spin_unlock_irqrestore(&lp
->lock
, flags
);
1341 EXPORT_SYMBOL(ldc_connect
);
1343 int ldc_disconnect(struct ldc_channel
*lp
)
1345 unsigned long hv_err
, flags
;
1348 if (lp
->cfg
.mode
== LDC_MODE_RAW
)
1351 if (!(lp
->flags
& LDC_FLAG_ALLOCED_QUEUES
) ||
1352 !(lp
->flags
& LDC_FLAG_REGISTERED_QUEUES
))
1355 spin_lock_irqsave(&lp
->lock
, flags
);
1358 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1362 hv_err
= sun4v_ldc_tx_qconf(lp
->id
, lp
->tx_ra
, lp
->tx_num_entries
);
1366 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1370 hv_err
= sun4v_ldc_rx_qconf(lp
->id
, lp
->rx_ra
, lp
->rx_num_entries
);
1374 ldc_set_state(lp
, LDC_STATE_BOUND
);
1375 lp
->hs_state
= LDC_HS_OPEN
;
1376 lp
->flags
|= LDC_FLAG_RESET
;
1378 spin_unlock_irqrestore(&lp
->lock
, flags
);
1383 sun4v_ldc_tx_qconf(lp
->id
, 0, 0);
1384 sun4v_ldc_rx_qconf(lp
->id
, 0, 0);
1385 free_irq(lp
->cfg
.tx_irq
, lp
);
1386 free_irq(lp
->cfg
.rx_irq
, lp
);
1387 lp
->flags
&= ~(LDC_FLAG_REGISTERED_IRQS
|
1388 LDC_FLAG_REGISTERED_QUEUES
);
1389 ldc_set_state(lp
, LDC_STATE_INIT
);
1391 spin_unlock_irqrestore(&lp
->lock
, flags
);
1395 EXPORT_SYMBOL(ldc_disconnect
);
1397 int ldc_state(struct ldc_channel
*lp
)
1401 EXPORT_SYMBOL(ldc_state
);
1403 static int write_raw(struct ldc_channel
*lp
, const void *buf
, unsigned int size
)
1405 struct ldc_packet
*p
;
1406 unsigned long new_tail
;
1409 if (size
> LDC_PACKET_SIZE
)
1412 p
= data_get_tx_packet(lp
, &new_tail
);
1416 memcpy(p
, buf
, size
);
1418 err
= send_tx_packet(lp
, p
, new_tail
);
1425 static int read_raw(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1427 struct ldc_packet
*p
;
1428 unsigned long hv_err
, new;
1431 if (size
< LDC_PACKET_SIZE
)
1434 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
1439 return ldc_abort(lp
);
1441 if (lp
->chan_state
== LDC_CHANNEL_DOWN
||
1442 lp
->chan_state
== LDC_CHANNEL_RESETTING
)
1445 if (lp
->rx_head
== lp
->rx_tail
)
1448 p
= lp
->rx_base
+ (lp
->rx_head
/ LDC_PACKET_SIZE
);
1449 memcpy(buf
, p
, LDC_PACKET_SIZE
);
1451 new = rx_advance(lp
, lp
->rx_head
);
1454 err
= __set_rx_head(lp
, new);
1458 err
= LDC_PACKET_SIZE
;
1463 static const struct ldc_mode_ops raw_ops
= {
1468 static int write_nonraw(struct ldc_channel
*lp
, const void *buf
,
1471 unsigned long hv_err
, tail
;
1472 unsigned int copied
;
1476 hv_err
= sun4v_ldc_tx_get_state(lp
->id
, &lp
->tx_head
, &lp
->tx_tail
,
1478 if (unlikely(hv_err
))
1481 if (unlikely(lp
->chan_state
!= LDC_CHANNEL_UP
))
1482 return ldc_abort(lp
);
1484 if (!tx_has_space_for(lp
, size
))
1490 while (copied
< size
) {
1491 struct ldc_packet
*p
= lp
->tx_base
+ (tail
/ LDC_PACKET_SIZE
);
1492 u8
*data
= ((lp
->cfg
.mode
== LDC_MODE_UNRELIABLE
) ?
1498 p
->stype
= LDC_INFO
;
1501 data_len
= size
- copied
;
1502 if (data_len
> lp
->mss
)
1505 BUG_ON(data_len
> LDC_LEN
);
1507 p
->env
= (data_len
|
1508 (copied
== 0 ? LDC_START
: 0) |
1509 (data_len
== size
- copied
? LDC_STOP
: 0));
1513 ldcdbg(DATA
, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n",
1520 memcpy(data
, buf
, data_len
);
1524 tail
= tx_advance(lp
, tail
);
1527 err
= set_tx_tail(lp
, tail
);
1536 static int rx_bad_seq(struct ldc_channel
*lp
, struct ldc_packet
*p
,
1537 struct ldc_packet
*first_frag
)
1542 lp
->rcv_nxt
= first_frag
->seqid
- 1;
1544 err
= send_data_nack(lp
, p
);
1548 err
= __set_rx_head(lp
, lp
->rx_tail
);
1550 return ldc_abort(lp
);
1555 static int data_ack_nack(struct ldc_channel
*lp
, struct ldc_packet
*p
)
1557 if (p
->stype
& LDC_ACK
) {
1558 int err
= process_data_ack(lp
, p
);
1562 if (p
->stype
& LDC_NACK
)
1563 return ldc_abort(lp
);
1568 static int rx_data_wait(struct ldc_channel
*lp
, unsigned long cur_head
)
1570 unsigned long dummy
;
1573 ldcdbg(DATA
, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n",
1574 cur_head
, lp
->rx_head
, lp
->rx_tail
);
1575 while (limit
-- > 0) {
1576 unsigned long hv_err
;
1578 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
1583 return ldc_abort(lp
);
1585 if (lp
->chan_state
== LDC_CHANNEL_DOWN
||
1586 lp
->chan_state
== LDC_CHANNEL_RESETTING
)
1589 if (cur_head
!= lp
->rx_tail
) {
1590 ldcdbg(DATA
, "DATA WAIT DONE "
1591 "head[%lx] tail[%lx] chan_state[%lx]\n",
1592 dummy
, lp
->rx_tail
, lp
->chan_state
);
1601 static int rx_set_head(struct ldc_channel
*lp
, unsigned long head
)
1603 int err
= __set_rx_head(lp
, head
);
1606 return ldc_abort(lp
);
1612 static void send_data_ack(struct ldc_channel
*lp
)
1614 unsigned long new_tail
;
1615 struct ldc_packet
*p
;
1617 p
= data_get_tx_packet(lp
, &new_tail
);
1621 memset(p
, 0, sizeof(*p
));
1625 p
->seqid
= lp
->snd_nxt
+ 1;
1626 p
->u
.r
.ackid
= lp
->rcv_nxt
;
1628 err
= send_tx_packet(lp
, p
, new_tail
);
1634 static int read_nonraw(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1636 struct ldc_packet
*first_frag
;
1637 unsigned long hv_err
, new;
1640 hv_err
= sun4v_ldc_rx_get_state(lp
->id
,
1645 return ldc_abort(lp
);
1647 if (lp
->chan_state
== LDC_CHANNEL_DOWN
||
1648 lp
->chan_state
== LDC_CHANNEL_RESETTING
)
1651 if (lp
->rx_head
== lp
->rx_tail
)
1658 struct ldc_packet
*p
;
1661 BUG_ON(new == lp
->rx_tail
);
1662 p
= lp
->rx_base
+ (new / LDC_PACKET_SIZE
);
1664 ldcdbg(RX
, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] "
1674 if (unlikely(!rx_seq_ok(lp
, p
->seqid
))) {
1675 err
= rx_bad_seq(lp
, p
, first_frag
);
1680 if (p
->type
& LDC_CTRL
) {
1681 err
= process_control_frame(lp
, p
);
1687 lp
->rcv_nxt
= p
->seqid
;
1689 if (!(p
->type
& LDC_DATA
)) {
1690 new = rx_advance(lp
, new);
1693 if (p
->stype
& (LDC_ACK
| LDC_NACK
)) {
1694 err
= data_ack_nack(lp
, p
);
1698 if (!(p
->stype
& LDC_INFO
)) {
1699 new = rx_advance(lp
, new);
1700 err
= rx_set_head(lp
, new);
1706 pkt_len
= p
->env
& LDC_LEN
;
1708 /* Every initial packet starts with the START bit set.
1710 * Singleton packets will have both START+STOP set.
1712 * Fragments will have START set in the first frame, STOP
1713 * set in the last frame, and neither bit set in middle
1714 * frames of the packet.
1716 * Therefore if we are at the beginning of a packet and
1717 * we don't see START, or we are in the middle of a fragmented
1718 * packet and do see START, we are unsynchronized and should
1719 * flush the RX queue.
1721 if ((first_frag
== NULL
&& !(p
->env
& LDC_START
)) ||
1722 (first_frag
!= NULL
&& (p
->env
& LDC_START
))) {
1724 new = rx_advance(lp
, new);
1726 err
= rx_set_head(lp
, new);
1736 if (pkt_len
> size
- copied
) {
1737 /* User didn't give us a big enough buffer,
1738 * what to do? This is a pretty serious error.
1740 * Since we haven't updated the RX ring head to
1741 * consume any of the packets, signal the error
1742 * to the user and just leave the RX ring alone.
1744 * This seems the best behavior because this allows
1745 * a user of the LDC layer to start with a small
1746 * RX buffer for ldc_read() calls and use -EMSGSIZE
1747 * as a cue to enlarge it's read buffer.
1753 /* Ok, we are gonna eat this one. */
1754 new = rx_advance(lp
, new);
1757 (lp
->cfg
.mode
== LDC_MODE_UNRELIABLE
?
1758 p
->u
.u_data
: p
->u
.r
.r_data
), pkt_len
);
1762 if (p
->env
& LDC_STOP
)
1766 if (new == lp
->rx_tail
) {
1767 err
= rx_data_wait(lp
, new);
1774 err
= rx_set_head(lp
, new);
1776 if (err
&& first_frag
)
1777 lp
->rcv_nxt
= first_frag
->seqid
- 1;
1781 if (err
> 0 && lp
->cfg
.mode
!= LDC_MODE_UNRELIABLE
)
1788 static const struct ldc_mode_ops nonraw_ops
= {
1789 .write
= write_nonraw
,
1790 .read
= read_nonraw
,
1793 static int write_stream(struct ldc_channel
*lp
, const void *buf
,
1796 if (size
> lp
->cfg
.mtu
)
1798 return write_nonraw(lp
, buf
, size
);
1801 static int read_stream(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1803 if (!lp
->mssbuf_len
) {
1804 int err
= read_nonraw(lp
, lp
->mssbuf
, lp
->cfg
.mtu
);
1808 lp
->mssbuf_len
= err
;
1812 if (size
> lp
->mssbuf_len
)
1813 size
= lp
->mssbuf_len
;
1814 memcpy(buf
, lp
->mssbuf
+ lp
->mssbuf_off
, size
);
1816 lp
->mssbuf_off
+= size
;
1817 lp
->mssbuf_len
-= size
;
1822 static const struct ldc_mode_ops stream_ops
= {
1823 .write
= write_stream
,
1824 .read
= read_stream
,
1827 int ldc_write(struct ldc_channel
*lp
, const void *buf
, unsigned int size
)
1829 unsigned long flags
;
1838 spin_lock_irqsave(&lp
->lock
, flags
);
1840 if (lp
->hs_state
!= LDC_HS_COMPLETE
)
1843 err
= lp
->mops
->write(lp
, buf
, size
);
1845 spin_unlock_irqrestore(&lp
->lock
, flags
);
1849 EXPORT_SYMBOL(ldc_write
);
1851 int ldc_read(struct ldc_channel
*lp
, void *buf
, unsigned int size
)
1853 unsigned long flags
;
1862 spin_lock_irqsave(&lp
->lock
, flags
);
1864 if (lp
->hs_state
!= LDC_HS_COMPLETE
)
1867 err
= lp
->mops
->read(lp
, buf
, size
);
1869 spin_unlock_irqrestore(&lp
->lock
, flags
);
1873 EXPORT_SYMBOL(ldc_read
);
1875 static long arena_alloc(struct ldc_iommu
*iommu
, unsigned long npages
)
1877 struct iommu_arena
*arena
= &iommu
->arena
;
1878 unsigned long n
, i
, start
, end
, limit
;
1881 limit
= arena
->limit
;
1882 start
= arena
->hint
;
1886 n
= find_next_zero_bit(arena
->map
, limit
, start
);
1888 if (unlikely(end
>= limit
)) {
1889 if (likely(pass
< 1)) {
1895 /* Scanned the whole thing, give up. */
1900 for (i
= n
; i
< end
; i
++) {
1901 if (test_bit(i
, arena
->map
)) {
1907 for (i
= n
; i
< end
; i
++)
1908 __set_bit(i
, arena
->map
);
1915 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL
1916 #define COOKIE_PGSZ_CODE_SHIFT 60ULL
1918 static u64
pagesize_code(void)
1920 switch (PAGE_SIZE
) {
1922 case (8ULL * 1024ULL):
1924 case (64ULL * 1024ULL):
1926 case (512ULL * 1024ULL):
1928 case (4ULL * 1024ULL * 1024ULL):
1930 case (32ULL * 1024ULL * 1024ULL):
1932 case (256ULL * 1024ULL * 1024ULL):
1937 static u64
make_cookie(u64 index
, u64 pgsz_code
, u64 page_offset
)
1939 return ((pgsz_code
<< COOKIE_PGSZ_CODE_SHIFT
) |
1940 (index
<< PAGE_SHIFT
) |
1944 static u64
cookie_to_index(u64 cookie
, unsigned long *shift
)
1946 u64 szcode
= cookie
>> COOKIE_PGSZ_CODE_SHIFT
;
1948 cookie
&= ~COOKIE_PGSZ_CODE
;
1950 *shift
= szcode
* 3;
1952 return (cookie
>> (13ULL + (szcode
* 3ULL)));
1955 static struct ldc_mtable_entry
*alloc_npages(struct ldc_iommu
*iommu
,
1956 unsigned long npages
)
1960 entry
= arena_alloc(iommu
, npages
);
1961 if (unlikely(entry
< 0))
1964 return iommu
->page_table
+ entry
;
1967 static u64
perm_to_mte(unsigned int map_perm
)
1971 mte_base
= pagesize_code();
1973 if (map_perm
& LDC_MAP_SHADOW
) {
1974 if (map_perm
& LDC_MAP_R
)
1975 mte_base
|= LDC_MTE_COPY_R
;
1976 if (map_perm
& LDC_MAP_W
)
1977 mte_base
|= LDC_MTE_COPY_W
;
1979 if (map_perm
& LDC_MAP_DIRECT
) {
1980 if (map_perm
& LDC_MAP_R
)
1981 mte_base
|= LDC_MTE_READ
;
1982 if (map_perm
& LDC_MAP_W
)
1983 mte_base
|= LDC_MTE_WRITE
;
1984 if (map_perm
& LDC_MAP_X
)
1985 mte_base
|= LDC_MTE_EXEC
;
1987 if (map_perm
& LDC_MAP_IO
) {
1988 if (map_perm
& LDC_MAP_R
)
1989 mte_base
|= LDC_MTE_IOMMU_R
;
1990 if (map_perm
& LDC_MAP_W
)
1991 mte_base
|= LDC_MTE_IOMMU_W
;
1997 static int pages_in_region(unsigned long base
, long len
)
2002 unsigned long new = (base
+ PAGE_SIZE
) & PAGE_MASK
;
2004 len
-= (new - base
);
2012 struct cookie_state
{
2013 struct ldc_mtable_entry
*page_table
;
2014 struct ldc_trans_cookie
*cookies
;
2021 static void fill_cookies(struct cookie_state
*sp
, unsigned long pa
,
2022 unsigned long off
, unsigned long len
)
2025 unsigned long tlen
, new = pa
+ PAGE_SIZE
;
2028 sp
->page_table
[sp
->pte_idx
].mte
= sp
->mte_base
| pa
;
2032 tlen
= PAGE_SIZE
- off
;
2036 this_cookie
= make_cookie(sp
->pte_idx
,
2037 pagesize_code(), off
);
2041 if (this_cookie
== sp
->prev_cookie
) {
2042 sp
->cookies
[sp
->nc
- 1].cookie_size
+= tlen
;
2044 sp
->cookies
[sp
->nc
].cookie_addr
= this_cookie
;
2045 sp
->cookies
[sp
->nc
].cookie_size
= tlen
;
2048 sp
->prev_cookie
= this_cookie
+ tlen
;
2057 static int sg_count_one(struct scatterlist
*sg
)
2059 unsigned long base
= page_to_pfn(sg_page(sg
)) << PAGE_SHIFT
;
2060 long len
= sg
->length
;
2062 if ((sg
->offset
| len
) & (8UL - 1))
2065 return pages_in_region(base
+ sg
->offset
, len
);
2068 static int sg_count_pages(struct scatterlist
*sg
, int num_sg
)
2074 for (i
= 0; i
< num_sg
; i
++) {
2075 int err
= sg_count_one(sg
+ i
);
2084 int ldc_map_sg(struct ldc_channel
*lp
,
2085 struct scatterlist
*sg
, int num_sg
,
2086 struct ldc_trans_cookie
*cookies
, int ncookies
,
2087 unsigned int map_perm
)
2089 unsigned long i
, npages
, flags
;
2090 struct ldc_mtable_entry
*base
;
2091 struct cookie_state state
;
2092 struct ldc_iommu
*iommu
;
2095 if (map_perm
& ~LDC_MAP_ALL
)
2098 err
= sg_count_pages(sg
, num_sg
);
2108 spin_lock_irqsave(&iommu
->lock
, flags
);
2109 base
= alloc_npages(iommu
, npages
);
2110 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2115 state
.page_table
= iommu
->page_table
;
2116 state
.cookies
= cookies
;
2117 state
.mte_base
= perm_to_mte(map_perm
);
2118 state
.prev_cookie
= ~(u64
)0;
2119 state
.pte_idx
= (base
- iommu
->page_table
);
2122 for (i
= 0; i
< num_sg
; i
++)
2123 fill_cookies(&state
, page_to_pfn(sg_page(&sg
[i
])) << PAGE_SHIFT
,
2124 sg
[i
].offset
, sg
[i
].length
);
2128 EXPORT_SYMBOL(ldc_map_sg
);
2130 int ldc_map_single(struct ldc_channel
*lp
,
2131 void *buf
, unsigned int len
,
2132 struct ldc_trans_cookie
*cookies
, int ncookies
,
2133 unsigned int map_perm
)
2135 unsigned long npages
, pa
, flags
;
2136 struct ldc_mtable_entry
*base
;
2137 struct cookie_state state
;
2138 struct ldc_iommu
*iommu
;
2140 if ((map_perm
& ~LDC_MAP_ALL
) || (ncookies
< 1))
2144 if ((pa
| len
) & (8UL - 1))
2147 npages
= pages_in_region(pa
, len
);
2151 spin_lock_irqsave(&iommu
->lock
, flags
);
2152 base
= alloc_npages(iommu
, npages
);
2153 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2158 state
.page_table
= iommu
->page_table
;
2159 state
.cookies
= cookies
;
2160 state
.mte_base
= perm_to_mte(map_perm
);
2161 state
.prev_cookie
= ~(u64
)0;
2162 state
.pte_idx
= (base
- iommu
->page_table
);
2164 fill_cookies(&state
, (pa
& PAGE_MASK
), (pa
& ~PAGE_MASK
), len
);
2165 BUG_ON(state
.nc
!= 1);
2169 EXPORT_SYMBOL(ldc_map_single
);
2171 static void free_npages(unsigned long id
, struct ldc_iommu
*iommu
,
2172 u64 cookie
, u64 size
)
2174 struct iommu_arena
*arena
= &iommu
->arena
;
2175 unsigned long i
, shift
, index
, npages
;
2176 struct ldc_mtable_entry
*base
;
2178 npages
= PAGE_ALIGN(((cookie
& ~PAGE_MASK
) + size
)) >> PAGE_SHIFT
;
2179 index
= cookie_to_index(cookie
, &shift
);
2180 base
= iommu
->page_table
+ index
;
2182 BUG_ON(index
> arena
->limit
||
2183 (index
+ npages
) > arena
->limit
);
2185 for (i
= 0; i
< npages
; i
++) {
2187 sun4v_ldc_revoke(id
, cookie
+ (i
<< shift
),
2190 __clear_bit(index
+ i
, arena
->map
);
2194 void ldc_unmap(struct ldc_channel
*lp
, struct ldc_trans_cookie
*cookies
,
2197 struct ldc_iommu
*iommu
= &lp
->iommu
;
2198 unsigned long flags
;
2201 spin_lock_irqsave(&iommu
->lock
, flags
);
2202 for (i
= 0; i
< ncookies
; i
++) {
2203 u64 addr
= cookies
[i
].cookie_addr
;
2204 u64 size
= cookies
[i
].cookie_size
;
2206 free_npages(lp
->id
, iommu
, addr
, size
);
2208 spin_unlock_irqrestore(&iommu
->lock
, flags
);
2210 EXPORT_SYMBOL(ldc_unmap
);
2212 int ldc_copy(struct ldc_channel
*lp
, int copy_dir
,
2213 void *buf
, unsigned int len
, unsigned long offset
,
2214 struct ldc_trans_cookie
*cookies
, int ncookies
)
2216 unsigned int orig_len
;
2220 if (copy_dir
!= LDC_COPY_IN
&& copy_dir
!= LDC_COPY_OUT
) {
2221 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] Bad copy_dir[%d]\n",
2227 if ((ra
| len
| offset
) & (8UL - 1)) {
2228 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] Unaligned buffer "
2229 "ra[%lx] len[%x] offset[%lx]\n",
2230 lp
->id
, ra
, len
, offset
);
2234 if (lp
->hs_state
!= LDC_HS_COMPLETE
||
2235 (lp
->flags
& LDC_FLAG_RESET
)) {
2236 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] Link down hs_state[%x] "
2237 "flags[%x]\n", lp
->id
, lp
->hs_state
, lp
->flags
);
2242 for (i
= 0; i
< ncookies
; i
++) {
2243 unsigned long cookie_raddr
= cookies
[i
].cookie_addr
;
2244 unsigned long this_len
= cookies
[i
].cookie_size
;
2245 unsigned long actual_len
;
2247 if (unlikely(offset
)) {
2248 unsigned long this_off
= offset
;
2250 if (this_off
> this_len
)
2251 this_off
= this_len
;
2254 this_len
-= this_off
;
2257 cookie_raddr
+= this_off
;
2264 unsigned long hv_err
;
2266 hv_err
= sun4v_ldc_copy(lp
->id
, copy_dir
,
2268 this_len
, &actual_len
);
2269 if (unlikely(hv_err
)) {
2270 printk(KERN_ERR PFX
"ldc_copy: ID[%lu] "
2273 if (lp
->hs_state
!= LDC_HS_COMPLETE
||
2274 (lp
->flags
& LDC_FLAG_RESET
))
2280 cookie_raddr
+= actual_len
;
2283 if (actual_len
== this_len
)
2286 this_len
-= actual_len
;
2293 /* It is caller policy what to do about short copies.
2294 * For example, a networking driver can declare the
2295 * packet a runt and drop it.
2298 return orig_len
- len
;
2300 EXPORT_SYMBOL(ldc_copy
);
2302 void *ldc_alloc_exp_dring(struct ldc_channel
*lp
, unsigned int len
,
2303 struct ldc_trans_cookie
*cookies
, int *ncookies
,
2304 unsigned int map_perm
)
2309 if (len
& (8UL - 1))
2310 return ERR_PTR(-EINVAL
);
2312 buf
= kzalloc(len
, GFP_KERNEL
);
2314 return ERR_PTR(-ENOMEM
);
2316 err
= ldc_map_single(lp
, buf
, len
, cookies
, *ncookies
, map_perm
);
2319 return ERR_PTR(err
);
2325 EXPORT_SYMBOL(ldc_alloc_exp_dring
);
2327 void ldc_free_exp_dring(struct ldc_channel
*lp
, void *buf
, unsigned int len
,
2328 struct ldc_trans_cookie
*cookies
, int ncookies
)
2330 ldc_unmap(lp
, cookies
, ncookies
);
2333 EXPORT_SYMBOL(ldc_free_exp_dring
);
2335 static int __init
ldc_init(void)
2337 unsigned long major
, minor
;
2338 struct mdesc_handle
*hp
;
2347 mp
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "platform");
2349 if (mp
== MDESC_NODE_NULL
)
2352 v
= mdesc_get_property(hp
, mp
, "domaining-enabled", NULL
);
2358 if (sun4v_hvapi_register(HV_GRP_LDOM
, major
, &minor
)) {
2359 printk(KERN_INFO PFX
"Could not register LDOM hvapi.\n");
2363 printk(KERN_INFO
"%s", version
);
2366 printk(KERN_INFO PFX
"Domaining disabled.\n");
2369 ldom_domaining_enabled
= 1;
2377 core_initcall(ldc_init
);