1 /* ds.c: Domain Services driver for Logical Domains
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/delay.h>
13 #include <linux/mutex.h>
14 #include <linux/kthread.h>
15 #include <linux/reboot.h>
16 #include <linux/cpu.h>
18 #include <asm/hypervisor.h>
21 #include <asm/mdesc.h>
27 #define DRV_MODULE_NAME "ds"
28 #define PFX DRV_MODULE_NAME ": "
29 #define DRV_MODULE_VERSION "1.0"
30 #define DRV_MODULE_RELDATE "Jul 11, 2007"
32 static char version
[] __devinitdata
=
33 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
34 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
35 MODULE_DESCRIPTION("Sun LDOM domain services driver");
36 MODULE_LICENSE("GPL");
37 MODULE_VERSION(DRV_MODULE_VERSION
);
41 #define DS_INIT_REQ 0x00
42 #define DS_INIT_ACK 0x01
43 #define DS_INIT_NACK 0x02
44 #define DS_REG_REQ 0x03
45 #define DS_REG_ACK 0x04
46 #define DS_REG_NACK 0x05
47 #define DS_UNREG_REQ 0x06
48 #define DS_UNREG_ACK 0x07
49 #define DS_UNREG_NACK 0x08
58 #define DS_REG_VER_NACK 0x01
59 #define DS_REG_DUP 0x02
60 #define DS_INV_HDL 0x03
61 #define DS_TYPE_UNKNOWN 0x04
69 struct ds_msg_tag tag
;
70 struct ds_version ver
;
74 struct ds_msg_tag tag
;
79 struct ds_msg_tag tag
;
84 struct ds_msg_tag tag
;
92 struct ds_msg_tag tag
;
98 struct ds_msg_tag tag
;
103 struct ds_unreg_req
{
104 struct ds_msg_tag tag
;
108 struct ds_unreg_ack
{
109 struct ds_msg_tag tag
;
113 struct ds_unreg_nack
{
114 struct ds_msg_tag tag
;
119 struct ds_msg_tag tag
;
123 struct ds_data_nack
{
124 struct ds_msg_tag tag
;
130 struct ds_cap_state
{
133 void (*data
)(struct ds_info
*dp
,
134 struct ds_cap_state
*cp
,
137 const char *service_id
;
140 #define CAP_STATE_UNKNOWN 0x00
141 #define CAP_STATE_REG_SENT 0x01
142 #define CAP_STATE_REGISTERED 0x02
145 static void md_update_data(struct ds_info
*dp
, struct ds_cap_state
*cp
,
147 static void domain_shutdown_data(struct ds_info
*dp
,
148 struct ds_cap_state
*cp
,
150 static void domain_panic_data(struct ds_info
*dp
,
151 struct ds_cap_state
*cp
,
153 #ifdef CONFIG_HOTPLUG_CPU
154 static void dr_cpu_data(struct ds_info
*dp
,
155 struct ds_cap_state
*cp
,
158 static void ds_pri_data(struct ds_info
*dp
,
159 struct ds_cap_state
*cp
,
161 static void ds_var_data(struct ds_info
*dp
,
162 struct ds_cap_state
*cp
,
165 static struct ds_cap_state ds_states_template
[] = {
167 .service_id
= "md-update",
168 .data
= md_update_data
,
171 .service_id
= "domain-shutdown",
172 .data
= domain_shutdown_data
,
175 .service_id
= "domain-panic",
176 .data
= domain_panic_data
,
178 #ifdef CONFIG_HOTPLUG_CPU
180 .service_id
= "dr-cpu",
189 .service_id
= "var-config",
193 .service_id
= "var-config-backup",
198 static DEFINE_SPINLOCK(ds_lock
);
201 struct ldc_channel
*lp
;
203 #define DS_HS_START 0x01
204 #define DS_HS_DONE 0x02
211 struct ds_cap_state
*ds_states
;
214 struct ds_info
*next
;
217 static struct ds_info
*ds_info_list
;
219 static struct ds_cap_state
*find_cap(struct ds_info
*dp
, u64 handle
)
221 unsigned int index
= handle
>> 32;
223 if (index
>= dp
->num_ds_states
)
225 return &dp
->ds_states
[index
];
228 static struct ds_cap_state
*find_cap_by_string(struct ds_info
*dp
,
233 for (i
= 0; i
< dp
->num_ds_states
; i
++) {
234 if (strcmp(dp
->ds_states
[i
].service_id
, name
))
237 return &dp
->ds_states
[i
];
242 static int __ds_send(struct ldc_channel
*lp
, void *data
, int len
)
244 int err
, limit
= 1000;
247 while (limit
-- > 0) {
248 err
= ldc_write(lp
, data
, len
);
249 if (!err
|| (err
!= -EAGAIN
))
257 static int ds_send(struct ldc_channel
*lp
, void *data
, int len
)
262 spin_lock_irqsave(&ds_lock
, flags
);
263 err
= __ds_send(lp
, data
, len
);
264 spin_unlock_irqrestore(&ds_lock
, flags
);
269 struct ds_md_update_req
{
273 struct ds_md_update_res
{
278 static void md_update_data(struct ds_info
*dp
,
279 struct ds_cap_state
*cp
,
282 struct ldc_channel
*lp
= dp
->lp
;
283 struct ds_data
*dpkt
= buf
;
284 struct ds_md_update_req
*rp
;
287 struct ds_md_update_res res
;
290 rp
= (struct ds_md_update_req
*) (dpkt
+ 1);
292 printk(KERN_INFO
"ds-%llu: Machine description update.\n", dp
->id
);
296 memset(&pkt
, 0, sizeof(pkt
));
297 pkt
.data
.tag
.type
= DS_DATA
;
298 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
299 pkt
.data
.handle
= cp
->handle
;
300 pkt
.res
.req_num
= rp
->req_num
;
301 pkt
.res
.result
= DS_OK
;
303 ds_send(lp
, &pkt
, sizeof(pkt
));
306 struct ds_shutdown_req
{
311 struct ds_shutdown_res
{
317 static void domain_shutdown_data(struct ds_info
*dp
,
318 struct ds_cap_state
*cp
,
321 struct ldc_channel
*lp
= dp
->lp
;
322 struct ds_data
*dpkt
= buf
;
323 struct ds_shutdown_req
*rp
;
326 struct ds_shutdown_res res
;
329 rp
= (struct ds_shutdown_req
*) (dpkt
+ 1);
331 printk(KERN_ALERT
"ds-%llu: Shutdown request from "
332 "LDOM manager received.\n", dp
->id
);
334 memset(&pkt
, 0, sizeof(pkt
));
335 pkt
.data
.tag
.type
= DS_DATA
;
336 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
337 pkt
.data
.handle
= cp
->handle
;
338 pkt
.res
.req_num
= rp
->req_num
;
339 pkt
.res
.result
= DS_OK
;
340 pkt
.res
.reason
[0] = 0;
342 ds_send(lp
, &pkt
, sizeof(pkt
));
344 orderly_poweroff(true);
347 struct ds_panic_req
{
351 struct ds_panic_res
{
357 static void domain_panic_data(struct ds_info
*dp
,
358 struct ds_cap_state
*cp
,
361 struct ldc_channel
*lp
= dp
->lp
;
362 struct ds_data
*dpkt
= buf
;
363 struct ds_panic_req
*rp
;
366 struct ds_panic_res res
;
369 rp
= (struct ds_panic_req
*) (dpkt
+ 1);
371 printk(KERN_ALERT
"ds-%llu: Panic request from "
372 "LDOM manager received.\n", dp
->id
);
374 memset(&pkt
, 0, sizeof(pkt
));
375 pkt
.data
.tag
.type
= DS_DATA
;
376 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
377 pkt
.data
.handle
= cp
->handle
;
378 pkt
.res
.req_num
= rp
->req_num
;
379 pkt
.res
.result
= DS_OK
;
380 pkt
.res
.reason
[0] = 0;
382 ds_send(lp
, &pkt
, sizeof(pkt
));
384 panic("PANIC requested by LDOM manager.");
387 #ifdef CONFIG_HOTPLUG_CPU
391 #define DR_CPU_CONFIGURE 0x43
392 #define DR_CPU_UNCONFIGURE 0x55
393 #define DR_CPU_FORCE_UNCONFIGURE 0x46
394 #define DR_CPU_STATUS 0x53
397 #define DR_CPU_OK 0x6f
398 #define DR_CPU_ERROR 0x65
403 struct dr_cpu_resp_entry
{
406 #define DR_CPU_RES_OK 0x00
407 #define DR_CPU_RES_FAILURE 0x01
408 #define DR_CPU_RES_BLOCKED 0x02
409 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
410 #define DR_CPU_RES_NOT_IN_MD 0x04
413 #define DR_CPU_STAT_NOT_PRESENT 0x00
414 #define DR_CPU_STAT_UNCONFIGURED 0x01
415 #define DR_CPU_STAT_CONFIGURED 0x02
420 static void __dr_cpu_send_error(struct ds_info
*dp
,
421 struct ds_cap_state
*cp
,
422 struct ds_data
*data
)
424 struct dr_cpu_tag
*tag
= (struct dr_cpu_tag
*) (data
+ 1);
427 struct dr_cpu_tag tag
;
431 memset(&pkt
, 0, sizeof(pkt
));
432 pkt
.data
.tag
.type
= DS_DATA
;
433 pkt
.data
.handle
= cp
->handle
;
434 pkt
.tag
.req_num
= tag
->req_num
;
435 pkt
.tag
.type
= DR_CPU_ERROR
;
436 pkt
.tag
.num_records
= 0;
438 msg_len
= (sizeof(struct ds_data
) +
439 sizeof(struct dr_cpu_tag
));
441 pkt
.data
.tag
.len
= msg_len
- sizeof(struct ds_msg_tag
);
443 __ds_send(dp
->lp
, &pkt
, msg_len
);
446 static void dr_cpu_send_error(struct ds_info
*dp
,
447 struct ds_cap_state
*cp
,
448 struct ds_data
*data
)
452 spin_lock_irqsave(&ds_lock
, flags
);
453 __dr_cpu_send_error(dp
, cp
, data
);
454 spin_unlock_irqrestore(&ds_lock
, flags
);
457 #define CPU_SENTINEL 0xffffffff
459 static void purge_dups(u32
*list
, u32 num_ents
)
463 for (i
= 0; i
< num_ents
; i
++) {
467 if (cpu
== CPU_SENTINEL
)
470 for (j
= i
+ 1; j
< num_ents
; j
++) {
472 list
[j
] = CPU_SENTINEL
;
477 static int dr_cpu_size_response(int ncpus
)
479 return (sizeof(struct ds_data
) +
480 sizeof(struct dr_cpu_tag
) +
481 (sizeof(struct dr_cpu_resp_entry
) * ncpus
));
484 static void dr_cpu_init_response(struct ds_data
*resp
, u64 req_num
,
485 u64 handle
, int resp_len
, int ncpus
,
486 cpumask_t
*mask
, u32 default_stat
)
488 struct dr_cpu_resp_entry
*ent
;
489 struct dr_cpu_tag
*tag
;
492 tag
= (struct dr_cpu_tag
*) (resp
+ 1);
493 ent
= (struct dr_cpu_resp_entry
*) (tag
+ 1);
495 resp
->tag
.type
= DS_DATA
;
496 resp
->tag
.len
= resp_len
- sizeof(struct ds_msg_tag
);
497 resp
->handle
= handle
;
498 tag
->req_num
= req_num
;
499 tag
->type
= DR_CPU_OK
;
500 tag
->num_records
= ncpus
;
503 for_each_cpu(cpu
, mask
) {
505 ent
[i
].result
= DR_CPU_RES_OK
;
506 ent
[i
].stat
= default_stat
;
512 static void dr_cpu_mark(struct ds_data
*resp
, int cpu
, int ncpus
,
515 struct dr_cpu_resp_entry
*ent
;
516 struct dr_cpu_tag
*tag
;
519 tag
= (struct dr_cpu_tag
*) (resp
+ 1);
520 ent
= (struct dr_cpu_resp_entry
*) (tag
+ 1);
522 for (i
= 0; i
< ncpus
; i
++) {
523 if (ent
[i
].cpu
!= cpu
)
531 static int __cpuinit
dr_cpu_configure(struct ds_info
*dp
,
532 struct ds_cap_state
*cp
,
536 struct ds_data
*resp
;
537 int resp_len
, ncpus
, cpu
;
540 ncpus
= cpumask_weight(mask
);
541 resp_len
= dr_cpu_size_response(ncpus
);
542 resp
= kzalloc(resp_len
, GFP_KERNEL
);
546 dr_cpu_init_response(resp
, req_num
, cp
->handle
,
547 resp_len
, ncpus
, mask
,
548 DR_CPU_STAT_CONFIGURED
);
550 mdesc_populate_present_mask(mask
);
551 mdesc_fill_in_cpu_data(mask
);
553 for_each_cpu(cpu
, mask
) {
556 printk(KERN_INFO
"ds-%llu: Starting cpu %d...\n",
560 __u32 res
= DR_CPU_RES_FAILURE
;
561 __u32 stat
= DR_CPU_STAT_UNCONFIGURED
;
563 if (!cpu_present(cpu
)) {
564 /* CPU not present in MD */
565 res
= DR_CPU_RES_NOT_IN_MD
;
566 stat
= DR_CPU_STAT_NOT_PRESENT
;
567 } else if (err
== -ENODEV
) {
568 /* CPU did not call in successfully */
569 res
= DR_CPU_RES_CPU_NOT_RESPONDING
;
572 printk(KERN_INFO
"ds-%llu: CPU startup failed err=%d\n",
574 dr_cpu_mark(resp
, cpu
, ncpus
, res
, stat
);
578 spin_lock_irqsave(&ds_lock
, flags
);
579 __ds_send(dp
->lp
, resp
, resp_len
);
580 spin_unlock_irqrestore(&ds_lock
, flags
);
584 /* Redistribute IRQs, taking into account the new cpus. */
590 static int dr_cpu_unconfigure(struct ds_info
*dp
,
591 struct ds_cap_state
*cp
,
595 struct ds_data
*resp
;
596 int resp_len
, ncpus
, cpu
;
599 ncpus
= cpumask_weight(mask
);
600 resp_len
= dr_cpu_size_response(ncpus
);
601 resp
= kzalloc(resp_len
, GFP_KERNEL
);
605 dr_cpu_init_response(resp
, req_num
, cp
->handle
,
606 resp_len
, ncpus
, mask
,
607 DR_CPU_STAT_UNCONFIGURED
);
609 for_each_cpu(cpu
, mask
) {
612 printk(KERN_INFO
"ds-%llu: Shutting down cpu %d...\n",
616 dr_cpu_mark(resp
, cpu
, ncpus
,
618 DR_CPU_STAT_CONFIGURED
);
621 spin_lock_irqsave(&ds_lock
, flags
);
622 __ds_send(dp
->lp
, resp
, resp_len
);
623 spin_unlock_irqrestore(&ds_lock
, flags
);
630 static void __cpuinit
dr_cpu_data(struct ds_info
*dp
,
631 struct ds_cap_state
*cp
,
634 struct ds_data
*data
= buf
;
635 struct dr_cpu_tag
*tag
= (struct dr_cpu_tag
*) (data
+ 1);
636 u32
*cpu_list
= (u32
*) (tag
+ 1);
637 u64 req_num
= tag
->req_num
;
643 case DR_CPU_CONFIGURE
:
644 case DR_CPU_UNCONFIGURE
:
645 case DR_CPU_FORCE_UNCONFIGURE
:
649 dr_cpu_send_error(dp
, cp
, data
);
653 purge_dups(cpu_list
, tag
->num_records
);
655 cpumask_clear(&mask
);
656 for (i
= 0; i
< tag
->num_records
; i
++) {
657 if (cpu_list
[i
] == CPU_SENTINEL
)
660 if (cpu_list
[i
] < nr_cpu_ids
)
661 cpumask_set_cpu(cpu_list
[i
], &mask
);
664 if (tag
->type
== DR_CPU_CONFIGURE
)
665 err
= dr_cpu_configure(dp
, cp
, req_num
, &mask
);
667 err
= dr_cpu_unconfigure(dp
, cp
, req_num
, &mask
);
670 dr_cpu_send_error(dp
, cp
, data
);
672 #endif /* CONFIG_HOTPLUG_CPU */
677 #define DS_PRI_REQUEST 0x00
678 #define DS_PRI_DATA 0x01
679 #define DS_PRI_UPDATE 0x02
682 static void ds_pri_data(struct ds_info
*dp
,
683 struct ds_cap_state
*cp
,
686 struct ds_data
*dpkt
= buf
;
687 struct ds_pri_msg
*rp
;
689 rp
= (struct ds_pri_msg
*) (dpkt
+ 1);
691 printk(KERN_INFO
"ds-%llu: PRI REQ [%llx:%llx], len=%d\n",
692 dp
->id
, rp
->req_num
, rp
->type
, len
);
697 #define DS_VAR_SET_REQ 0x00
698 #define DS_VAR_DELETE_REQ 0x01
699 #define DS_VAR_SET_RESP 0x02
700 #define DS_VAR_DELETE_RESP 0x03
703 struct ds_var_set_msg
{
704 struct ds_var_hdr hdr
;
705 char name_and_value
[0];
708 struct ds_var_delete_msg
{
709 struct ds_var_hdr hdr
;
714 struct ds_var_hdr hdr
;
716 #define DS_VAR_SUCCESS 0x00
717 #define DS_VAR_NO_SPACE 0x01
718 #define DS_VAR_INVALID_VAR 0x02
719 #define DS_VAR_INVALID_VAL 0x03
720 #define DS_VAR_NOT_PRESENT 0x04
723 static DEFINE_MUTEX(ds_var_mutex
);
724 static int ds_var_doorbell
;
725 static int ds_var_response
;
727 static void ds_var_data(struct ds_info
*dp
,
728 struct ds_cap_state
*cp
,
731 struct ds_data
*dpkt
= buf
;
732 struct ds_var_resp
*rp
;
734 rp
= (struct ds_var_resp
*) (dpkt
+ 1);
736 if (rp
->hdr
.type
!= DS_VAR_SET_RESP
&&
737 rp
->hdr
.type
!= DS_VAR_DELETE_RESP
)
740 ds_var_response
= rp
->result
;
745 void ldom_set_var(const char *var
, const char *value
)
747 struct ds_cap_state
*cp
;
751 spin_lock_irqsave(&ds_lock
, flags
);
753 for (dp
= ds_info_list
; dp
; dp
= dp
->next
) {
754 struct ds_cap_state
*tmp
;
756 tmp
= find_cap_by_string(dp
, "var-config");
757 if (tmp
&& tmp
->state
== CAP_STATE_REGISTERED
) {
763 for (dp
= ds_info_list
; dp
; dp
= dp
->next
) {
764 struct ds_cap_state
*tmp
;
766 tmp
= find_cap_by_string(dp
, "var-config-backup");
767 if (tmp
&& tmp
->state
== CAP_STATE_REGISTERED
) {
773 spin_unlock_irqrestore(&ds_lock
, flags
);
779 struct ds_var_set_msg msg
;
786 memset(&pkt
, 0, sizeof(pkt
));
787 pkt
.header
.data
.tag
.type
= DS_DATA
;
788 pkt
.header
.data
.handle
= cp
->handle
;
789 pkt
.header
.msg
.hdr
.type
= DS_VAR_SET_REQ
;
790 base
= p
= &pkt
.header
.msg
.name_and_value
[0];
792 p
+= strlen(var
) + 1;
794 p
+= strlen(value
) + 1;
796 msg_len
= (sizeof(struct ds_data
) +
797 sizeof(struct ds_var_set_msg
) +
799 msg_len
= (msg_len
+ 3) & ~3;
800 pkt
.header
.data
.tag
.len
= msg_len
- sizeof(struct ds_msg_tag
);
802 mutex_lock(&ds_var_mutex
);
804 spin_lock_irqsave(&ds_lock
, flags
);
806 ds_var_response
= -1;
808 __ds_send(dp
->lp
, &pkt
, msg_len
);
809 spin_unlock_irqrestore(&ds_lock
, flags
);
812 while (ds_var_doorbell
== 0) {
819 mutex_unlock(&ds_var_mutex
);
821 if (ds_var_doorbell
== 0 ||
822 ds_var_response
!= DS_VAR_SUCCESS
)
823 printk(KERN_ERR
"ds-%llu: var-config [%s:%s] "
824 "failed, response(%d).\n",
828 printk(KERN_ERR PFX
"var-config not registered so "
829 "could not set (%s) variable to (%s).\n",
834 static char full_boot_str
[256] __attribute__((aligned(32)));
835 static int reboot_data_supported
;
837 void ldom_reboot(const char *boot_command
)
839 /* Don't bother with any of this if the boot_command
842 if (boot_command
&& strlen(boot_command
)) {
845 strcpy(full_boot_str
, "boot ");
846 strcpy(full_boot_str
+ strlen("boot "), boot_command
);
847 len
= strlen(full_boot_str
);
849 if (reboot_data_supported
) {
850 unsigned long ra
= kimage_addr_to_ra(full_boot_str
);
851 unsigned long hv_ret
;
853 hv_ret
= sun4v_reboot_data_set(ra
, len
);
854 if (hv_ret
!= HV_EOK
)
855 pr_err("SUN4V: Unable to set reboot data "
856 "hv_ret=%lu\n", hv_ret
);
858 ldom_set_var("reboot-command", full_boot_str
);
864 void ldom_power_off(void)
869 static void ds_conn_reset(struct ds_info
*dp
)
871 printk(KERN_ERR
"ds-%llu: ds_conn_reset() from %p\n",
872 dp
->id
, __builtin_return_address(0));
875 static int register_services(struct ds_info
*dp
)
877 struct ldc_channel
*lp
= dp
->lp
;
880 for (i
= 0; i
< dp
->num_ds_states
; i
++) {
882 struct ds_reg_req req
;
885 struct ds_cap_state
*cp
= &dp
->ds_states
[i
];
889 if (cp
->state
== CAP_STATE_REGISTERED
)
892 new_count
= sched_clock() & 0xffffffff;
893 cp
->handle
= ((u64
) i
<< 32) | new_count
;
895 msg_len
= (sizeof(struct ds_reg_req
) +
896 strlen(cp
->service_id
));
898 memset(&pbuf
, 0, sizeof(pbuf
));
899 pbuf
.req
.tag
.type
= DS_REG_REQ
;
900 pbuf
.req
.tag
.len
= (msg_len
- sizeof(struct ds_msg_tag
));
901 pbuf
.req
.handle
= cp
->handle
;
904 strcpy(pbuf
.req
.svc_id
, cp
->service_id
);
906 err
= __ds_send(lp
, &pbuf
, msg_len
);
908 cp
->state
= CAP_STATE_REG_SENT
;
913 static int ds_handshake(struct ds_info
*dp
, struct ds_msg_tag
*pkt
)
916 if (dp
->hs_state
== DS_HS_START
) {
917 if (pkt
->type
!= DS_INIT_ACK
)
920 dp
->hs_state
= DS_HS_DONE
;
922 return register_services(dp
);
925 if (dp
->hs_state
!= DS_HS_DONE
)
928 if (pkt
->type
== DS_REG_ACK
) {
929 struct ds_reg_ack
*ap
= (struct ds_reg_ack
*) pkt
;
930 struct ds_cap_state
*cp
= find_cap(dp
, ap
->handle
);
933 printk(KERN_ERR
"ds-%llu: REG ACK for unknown "
934 "handle %llx\n", dp
->id
, ap
->handle
);
937 printk(KERN_INFO
"ds-%llu: Registered %s service.\n",
938 dp
->id
, cp
->service_id
);
939 cp
->state
= CAP_STATE_REGISTERED
;
940 } else if (pkt
->type
== DS_REG_NACK
) {
941 struct ds_reg_nack
*np
= (struct ds_reg_nack
*) pkt
;
942 struct ds_cap_state
*cp
= find_cap(dp
, np
->handle
);
945 printk(KERN_ERR
"ds-%llu: REG NACK for "
946 "unknown handle %llx\n",
950 cp
->state
= CAP_STATE_UNKNOWN
;
960 static void __send_ds_nack(struct ds_info
*dp
, u64 handle
)
962 struct ds_data_nack nack
= {
965 .len
= (sizeof(struct ds_data_nack
) -
966 sizeof(struct ds_msg_tag
)),
969 .result
= DS_INV_HDL
,
972 __ds_send(dp
->lp
, &nack
, sizeof(nack
));
975 static LIST_HEAD(ds_work_list
);
976 static DECLARE_WAIT_QUEUE_HEAD(ds_wait
);
978 struct ds_queue_entry
{
979 struct list_head list
;
986 static void process_ds_work(void)
988 struct ds_queue_entry
*qp
, *tmp
;
992 spin_lock_irqsave(&ds_lock
, flags
);
993 list_splice_init(&ds_work_list
, &todo
);
994 spin_unlock_irqrestore(&ds_lock
, flags
);
996 list_for_each_entry_safe(qp
, tmp
, &todo
, list
) {
997 struct ds_data
*dpkt
= (struct ds_data
*) qp
->req
;
998 struct ds_info
*dp
= qp
->dp
;
999 struct ds_cap_state
*cp
= find_cap(dp
, dpkt
->handle
);
1000 int req_len
= qp
->req_len
;
1003 printk(KERN_ERR
"ds-%llu: Data for unknown "
1005 dp
->id
, dpkt
->handle
);
1007 spin_lock_irqsave(&ds_lock
, flags
);
1008 __send_ds_nack(dp
, dpkt
->handle
);
1009 spin_unlock_irqrestore(&ds_lock
, flags
);
1011 cp
->data(dp
, cp
, dpkt
, req_len
);
1014 list_del(&qp
->list
);
1019 static int ds_thread(void *__unused
)
1024 prepare_to_wait(&ds_wait
, &wait
, TASK_INTERRUPTIBLE
);
1025 if (list_empty(&ds_work_list
))
1027 finish_wait(&ds_wait
, &wait
);
1029 if (kthread_should_stop())
1038 static int ds_data(struct ds_info
*dp
, struct ds_msg_tag
*pkt
, int len
)
1040 struct ds_data
*dpkt
= (struct ds_data
*) pkt
;
1041 struct ds_queue_entry
*qp
;
1043 qp
= kmalloc(sizeof(struct ds_queue_entry
) + len
, GFP_ATOMIC
);
1045 __send_ds_nack(dp
, dpkt
->handle
);
1048 memcpy(&qp
->req
, pkt
, len
);
1049 list_add_tail(&qp
->list
, &ds_work_list
);
1055 static void ds_up(struct ds_info
*dp
)
1057 struct ldc_channel
*lp
= dp
->lp
;
1058 struct ds_ver_req req
;
1061 req
.tag
.type
= DS_INIT_REQ
;
1062 req
.tag
.len
= sizeof(req
) - sizeof(struct ds_msg_tag
);
1066 err
= __ds_send(lp
, &req
, sizeof(req
));
1068 dp
->hs_state
= DS_HS_START
;
1071 static void ds_reset(struct ds_info
*dp
)
1077 for (i
= 0; i
< dp
->num_ds_states
; i
++) {
1078 struct ds_cap_state
*cp
= &dp
->ds_states
[i
];
1080 cp
->state
= CAP_STATE_UNKNOWN
;
1084 static void ds_event(void *arg
, int event
)
1086 struct ds_info
*dp
= arg
;
1087 struct ldc_channel
*lp
= dp
->lp
;
1088 unsigned long flags
;
1091 spin_lock_irqsave(&ds_lock
, flags
);
1093 if (event
== LDC_EVENT_UP
) {
1095 spin_unlock_irqrestore(&ds_lock
, flags
);
1099 if (event
== LDC_EVENT_RESET
) {
1101 spin_unlock_irqrestore(&ds_lock
, flags
);
1105 if (event
!= LDC_EVENT_DATA_READY
) {
1106 printk(KERN_WARNING
"ds-%llu: Unexpected LDC event %d\n",
1108 spin_unlock_irqrestore(&ds_lock
, flags
);
1114 struct ds_msg_tag
*tag
;
1116 err
= ldc_read(lp
, dp
->rcv_buf
, sizeof(*tag
));
1118 if (unlikely(err
< 0)) {
1119 if (err
== -ECONNRESET
)
1127 err
= ldc_read(lp
, tag
+ 1, tag
->len
);
1129 if (unlikely(err
< 0)) {
1130 if (err
== -ECONNRESET
)
1137 if (tag
->type
< DS_DATA
)
1138 err
= ds_handshake(dp
, dp
->rcv_buf
);
1140 err
= ds_data(dp
, dp
->rcv_buf
,
1141 sizeof(*tag
) + err
);
1142 if (err
== -ECONNRESET
)
1146 spin_unlock_irqrestore(&ds_lock
, flags
);
1149 static int __devinit
ds_probe(struct vio_dev
*vdev
,
1150 const struct vio_device_id
*id
)
1152 static int ds_version_printed
;
1153 struct ldc_channel_config ds_cfg
= {
1156 .mode
= LDC_MODE_STREAM
,
1158 struct mdesc_handle
*hp
;
1159 struct ldc_channel
*lp
;
1164 if (ds_version_printed
++ == 0)
1165 printk(KERN_INFO
"%s", version
);
1167 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1173 val
= mdesc_get_property(hp
, vdev
->mp
, "id", NULL
);
1178 dp
->rcv_buf
= kzalloc(4096, GFP_KERNEL
);
1182 dp
->rcv_buf_len
= 4096;
1184 dp
->ds_states
= kzalloc(sizeof(ds_states_template
),
1187 goto out_free_rcv_buf
;
1189 memcpy(dp
->ds_states
, ds_states_template
,
1190 sizeof(ds_states_template
));
1191 dp
->num_ds_states
= ARRAY_SIZE(ds_states_template
);
1193 for (i
= 0; i
< dp
->num_ds_states
; i
++)
1194 dp
->ds_states
[i
].handle
= ((u64
)i
<< 32);
1196 ds_cfg
.tx_irq
= vdev
->tx_irq
;
1197 ds_cfg
.rx_irq
= vdev
->rx_irq
;
1199 lp
= ldc_alloc(vdev
->channel_id
, &ds_cfg
, dp
);
1202 goto out_free_ds_states
;
1206 err
= ldc_bind(lp
, "DS");
1210 spin_lock_irq(&ds_lock
);
1211 dp
->next
= ds_info_list
;
1213 spin_unlock_irq(&ds_lock
);
1221 kfree(dp
->ds_states
);
1233 static int ds_remove(struct vio_dev
*vdev
)
1238 static const struct vio_device_id ds_match
[] = {
1240 .type
= "domain-services-port",
1245 static struct vio_driver ds_driver
= {
1246 .id_table
= ds_match
,
1248 .remove
= ds_remove
,
1251 .owner
= THIS_MODULE
,
1255 static int __init
ds_init(void)
1257 unsigned long hv_ret
, major
, minor
;
1259 hv_ret
= sun4v_get_version(HV_GRP_REBOOT_DATA
, &major
, &minor
);
1260 if (hv_ret
== HV_EOK
) {
1261 pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
1263 reboot_data_supported
= 1;
1266 kthread_run(ds_thread
, NULL
, "kldomd");
1268 return vio_register_driver(&ds_driver
);
1271 subsys_initcall(ds_init
);