2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <asm/uv/uv_hub.h>
24 #if defined CONFIG_X86_64
25 #include <asm/uv/bios.h>
26 #include <asm/uv/uv_irq.h>
27 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
28 #include <asm/sn/intr.h>
29 #include <asm/sn/sn_sal.h>
31 #include "../sgi-gru/gru.h"
32 #include "../sgi-gru/grukservices.h"
35 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
36 struct uv_IO_APIC_route_entry
{
50 static struct xpc_heartbeat_uv
*xpc_heartbeat_uv
;
52 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
53 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
54 XPC_ACTIVATE_MSG_SIZE_UV)
55 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
57 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
58 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
59 XPC_NOTIFY_MSG_SIZE_UV)
60 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
62 static struct xpc_gru_mq_uv
*xpc_activate_mq_uv
;
63 static struct xpc_gru_mq_uv
*xpc_notify_mq_uv
;
66 xpc_setup_partitions_uv(void)
69 struct xpc_partition_uv
*part_uv
;
71 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
72 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
74 mutex_init(&part_uv
->cached_activate_gru_mq_desc_mutex
);
75 spin_lock_init(&part_uv
->flags_lock
);
76 part_uv
->remote_act_state
= XPC_P_AS_INACTIVE
;
82 xpc_teardown_partitions_uv(void)
85 struct xpc_partition_uv
*part_uv
;
86 unsigned long irq_flags
;
88 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
89 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
91 if (part_uv
->cached_activate_gru_mq_desc
!= NULL
) {
92 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
93 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
94 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
95 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
96 kfree(part_uv
->cached_activate_gru_mq_desc
);
97 part_uv
->cached_activate_gru_mq_desc
= NULL
;
98 mutex_unlock(&part_uv
->
99 cached_activate_gru_mq_desc_mutex
);
105 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
, int cpu
, char *irq_name
)
107 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
109 #if defined CONFIG_X86_64
110 mq
->irq
= uv_setup_irq(irq_name
, cpu
, mq
->mmr_blade
, mq
->mmr_offset
,
113 dev_err(xpc_part
, "uv_setup_irq() returned error=%d\n",
118 mq
->mmr_value
= uv_read_global_mmr64(mmr_pnode
, mq
->mmr_offset
);
120 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
121 if (strcmp(irq_name
, XPC_ACTIVATE_IRQ_NAME
) == 0)
122 mq
->irq
= SGI_XPC_ACTIVATE
;
123 else if (strcmp(irq_name
, XPC_NOTIFY_IRQ_NAME
) == 0)
124 mq
->irq
= SGI_XPC_NOTIFY
;
128 mq
->mmr_value
= (unsigned long)cpu_physical_id(cpu
) << 32 | mq
->irq
;
129 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mq
->mmr_value
);
131 #error not a supported configuration
138 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
)
140 #if defined CONFIG_X86_64
141 uv_teardown_irq(mq
->irq
);
143 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
145 unsigned long mmr_value
;
147 mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
148 mmr_value
= 1UL << 16;
150 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mmr_value
);
152 #error not a supported configuration
157 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv
*mq
)
161 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
162 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
164 ret
= sn_mq_watchlist_alloc(mmr_pnode
, (void *)uv_gpa(mq
->address
),
165 mq
->order
, &mq
->mmr_offset
);
167 dev_err(xpc_part
, "sn_mq_watchlist_alloc() failed, ret=%d\n",
171 #elif defined CONFIG_X86_64
172 ret
= uv_bios_mq_watchlist_alloc(uv_gpa(mq
->address
),
173 mq
->order
, &mq
->mmr_offset
);
175 dev_err(xpc_part
, "uv_bios_mq_watchlist_alloc() failed, "
180 #error not a supported configuration
183 mq
->watchlist_num
= ret
;
188 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv
*mq
)
191 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
193 #if defined CONFIG_X86_64
194 ret
= uv_bios_mq_watchlist_free(mmr_pnode
, mq
->watchlist_num
);
195 BUG_ON(ret
!= BIOS_STATUS_SUCCESS
);
196 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
197 ret
= sn_mq_watchlist_free(mmr_pnode
, mq
->watchlist_num
);
198 BUG_ON(ret
!= SALRET_OK
);
200 #error not a supported configuration
204 static struct xpc_gru_mq_uv
*
205 xpc_create_gru_mq_uv(unsigned int mq_size
, int cpu
, char *irq_name
,
206 irq_handler_t irq_handler
)
208 enum xp_retval xp_ret
;
214 struct xpc_gru_mq_uv
*mq
;
215 struct uv_IO_APIC_route_entry
*mmr_value
;
217 mq
= kmalloc(sizeof(struct xpc_gru_mq_uv
), GFP_KERNEL
);
219 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
220 "a xpc_gru_mq_uv structure\n");
225 mq
->gru_mq_desc
= kzalloc(sizeof(struct gru_message_queue_desc
),
227 if (mq
->gru_mq_desc
== NULL
) {
228 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
229 "a gru_message_queue_desc structure\n");
234 pg_order
= get_order(mq_size
);
235 mq
->order
= pg_order
+ PAGE_SHIFT
;
236 mq_size
= 1UL << mq
->order
;
238 mq
->mmr_blade
= uv_cpu_to_blade_id(cpu
);
240 nid
= cpu_to_node(cpu
);
241 page
= alloc_pages_exact_node(nid
, GFP_KERNEL
| __GFP_ZERO
| GFP_THISNODE
,
244 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to alloc %d "
245 "bytes of memory on nid=%d for GRU mq\n", mq_size
, nid
);
249 mq
->address
= page_address(page
);
251 /* enable generation of irq when GRU mq operation occurs to this mq */
252 ret
= xpc_gru_mq_watchlist_alloc_uv(mq
);
256 ret
= xpc_get_gru_mq_irq_uv(mq
, cpu
, irq_name
);
260 ret
= request_irq(mq
->irq
, irq_handler
, 0, irq_name
, NULL
);
262 dev_err(xpc_part
, "request_irq(irq=%d) returned error=%d\n",
267 nasid
= UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu
));
269 mmr_value
= (struct uv_IO_APIC_route_entry
*)&mq
->mmr_value
;
270 ret
= gru_create_message_queue(mq
->gru_mq_desc
, mq
->address
, mq_size
,
271 nasid
, mmr_value
->vector
, mmr_value
->dest
);
273 dev_err(xpc_part
, "gru_create_message_queue() returned "
279 /* allow other partitions to access this GRU mq */
280 xp_ret
= xp_expand_memprotect(xp_pa(mq
->address
), mq_size
);
281 if (xp_ret
!= xpSuccess
) {
288 /* something went wrong */
290 free_irq(mq
->irq
, NULL
);
292 xpc_release_gru_mq_irq_uv(mq
);
294 xpc_gru_mq_watchlist_free_uv(mq
);
296 free_pages((unsigned long)mq
->address
, pg_order
);
298 kfree(mq
->gru_mq_desc
);
306 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv
*mq
)
308 unsigned int mq_size
;
312 /* disallow other partitions to access GRU mq */
313 mq_size
= 1UL << mq
->order
;
314 ret
= xp_restrict_memprotect(xp_pa(mq
->address
), mq_size
);
315 BUG_ON(ret
!= xpSuccess
);
317 /* unregister irq handler and release mq irq/vector mapping */
318 free_irq(mq
->irq
, NULL
);
319 xpc_release_gru_mq_irq_uv(mq
);
321 /* disable generation of irq when GRU mq op occurs to this mq */
322 xpc_gru_mq_watchlist_free_uv(mq
);
324 pg_order
= mq
->order
- PAGE_SHIFT
;
325 free_pages((unsigned long)mq
->address
, pg_order
);
330 static enum xp_retval
331 xpc_send_gru_msg(struct gru_message_queue_desc
*gru_mq_desc
, void *msg
,
334 enum xp_retval xp_ret
;
338 ret
= gru_send_message_gpa(gru_mq_desc
, msg
, msg_size
);
344 if (ret
== MQE_QUEUE_FULL
) {
345 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
346 "error=MQE_QUEUE_FULL\n");
347 /* !!! handle QLimit reached; delay & try again */
348 /* ??? Do we add a limit to the number of retries? */
349 (void)msleep_interruptible(10);
350 } else if (ret
== MQE_CONGESTION
) {
351 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
352 "error=MQE_CONGESTION\n");
353 /* !!! handle LB Overflow; simply try again */
354 /* ??? Do we add a limit to the number of retries? */
356 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
357 dev_err(xpc_chan
, "gru_send_message_gpa() returned "
359 xp_ret
= xpGruSendMqError
;
367 xpc_process_activate_IRQ_rcvd_uv(void)
369 unsigned long irq_flags
;
371 struct xpc_partition
*part
;
374 DBUG_ON(xpc_activate_IRQ_rcvd
== 0);
376 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
377 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
378 part
= &xpc_partitions
[partid
];
380 if (part
->sn
.uv
.act_state_req
== 0)
383 xpc_activate_IRQ_rcvd
--;
384 BUG_ON(xpc_activate_IRQ_rcvd
< 0);
386 act_state_req
= part
->sn
.uv
.act_state_req
;
387 part
->sn
.uv
.act_state_req
= 0;
388 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
390 if (act_state_req
== XPC_P_ASR_ACTIVATE_UV
) {
391 if (part
->act_state
== XPC_P_AS_INACTIVE
)
392 xpc_activate_partition(part
);
393 else if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
394 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
396 } else if (act_state_req
== XPC_P_ASR_REACTIVATE_UV
) {
397 if (part
->act_state
== XPC_P_AS_INACTIVE
)
398 xpc_activate_partition(part
);
400 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
402 } else if (act_state_req
== XPC_P_ASR_DEACTIVATE_UV
) {
403 XPC_DEACTIVATE_PARTITION(part
, part
->sn
.uv
.reason
);
409 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
410 if (xpc_activate_IRQ_rcvd
== 0)
413 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
418 xpc_handle_activate_mq_msg_uv(struct xpc_partition
*part
,
419 struct xpc_activate_mq_msghdr_uv
*msg_hdr
,
420 int *wakeup_hb_checker
)
422 unsigned long irq_flags
;
423 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
424 struct xpc_openclose_args
*args
;
426 part_uv
->remote_act_state
= msg_hdr
->act_state
;
428 switch (msg_hdr
->type
) {
429 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
:
430 /* syncing of remote_act_state was just done above */
433 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
: {
434 struct xpc_activate_mq_msg_activate_req_uv
*msg
;
437 * ??? Do we deal here with ts_jiffies being different
438 * ??? if act_state != XPC_P_AS_INACTIVE instead of
441 msg
= container_of(msg_hdr
, struct
442 xpc_activate_mq_msg_activate_req_uv
, hdr
);
444 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
445 if (part_uv
->act_state_req
== 0)
446 xpc_activate_IRQ_rcvd
++;
447 part_uv
->act_state_req
= XPC_P_ASR_ACTIVATE_UV
;
448 part
->remote_rp_pa
= msg
->rp_gpa
; /* !!! _pa is _gpa */
449 part
->remote_rp_ts_jiffies
= msg_hdr
->rp_ts_jiffies
;
450 part_uv
->heartbeat_gpa
= msg
->heartbeat_gpa
;
452 if (msg
->activate_gru_mq_desc_gpa
!=
453 part_uv
->activate_gru_mq_desc_gpa
) {
454 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
455 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
456 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
457 part_uv
->activate_gru_mq_desc_gpa
=
458 msg
->activate_gru_mq_desc_gpa
;
460 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
462 (*wakeup_hb_checker
)++;
465 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
: {
466 struct xpc_activate_mq_msg_deactivate_req_uv
*msg
;
468 msg
= container_of(msg_hdr
, struct
469 xpc_activate_mq_msg_deactivate_req_uv
, hdr
);
471 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
472 if (part_uv
->act_state_req
== 0)
473 xpc_activate_IRQ_rcvd
++;
474 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
475 part_uv
->reason
= msg
->reason
;
476 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
478 (*wakeup_hb_checker
)++;
481 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
: {
482 struct xpc_activate_mq_msg_chctl_closerequest_uv
*msg
;
484 msg
= container_of(msg_hdr
, struct
485 xpc_activate_mq_msg_chctl_closerequest_uv
,
487 args
= &part
->remote_openclose_args
[msg
->ch_number
];
488 args
->reason
= msg
->reason
;
490 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
491 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREQUEST
;
492 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
494 xpc_wakeup_channel_mgr(part
);
497 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
: {
498 struct xpc_activate_mq_msg_chctl_closereply_uv
*msg
;
500 msg
= container_of(msg_hdr
, struct
501 xpc_activate_mq_msg_chctl_closereply_uv
,
504 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
505 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREPLY
;
506 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
508 xpc_wakeup_channel_mgr(part
);
511 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
: {
512 struct xpc_activate_mq_msg_chctl_openrequest_uv
*msg
;
514 msg
= container_of(msg_hdr
, struct
515 xpc_activate_mq_msg_chctl_openrequest_uv
,
517 args
= &part
->remote_openclose_args
[msg
->ch_number
];
518 args
->entry_size
= msg
->entry_size
;
519 args
->local_nentries
= msg
->local_nentries
;
521 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
522 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREQUEST
;
523 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
525 xpc_wakeup_channel_mgr(part
);
528 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
: {
529 struct xpc_activate_mq_msg_chctl_openreply_uv
*msg
;
531 msg
= container_of(msg_hdr
, struct
532 xpc_activate_mq_msg_chctl_openreply_uv
, hdr
);
533 args
= &part
->remote_openclose_args
[msg
->ch_number
];
534 args
->remote_nentries
= msg
->remote_nentries
;
535 args
->local_nentries
= msg
->local_nentries
;
536 args
->local_msgqueue_pa
= msg
->notify_gru_mq_desc_gpa
;
538 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
539 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREPLY
;
540 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
542 xpc_wakeup_channel_mgr(part
);
545 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV
: {
546 struct xpc_activate_mq_msg_chctl_opencomplete_uv
*msg
;
548 msg
= container_of(msg_hdr
, struct
549 xpc_activate_mq_msg_chctl_opencomplete_uv
, hdr
);
550 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
551 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENCOMPLETE
;
552 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
554 xpc_wakeup_channel_mgr(part
);
556 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
:
557 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
558 part_uv
->flags
|= XPC_P_ENGAGED_UV
;
559 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
562 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
:
563 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
564 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
565 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
569 dev_err(xpc_part
, "received unknown activate_mq msg type=%d "
570 "from partition=%d\n", msg_hdr
->type
, XPC_PARTID(part
));
572 /* get hb checker to deactivate from the remote partition */
573 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
574 if (part_uv
->act_state_req
== 0)
575 xpc_activate_IRQ_rcvd
++;
576 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
577 part_uv
->reason
= xpBadMsgType
;
578 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
580 (*wakeup_hb_checker
)++;
584 if (msg_hdr
->rp_ts_jiffies
!= part
->remote_rp_ts_jiffies
&&
585 part
->remote_rp_ts_jiffies
!= 0) {
587 * ??? Does what we do here need to be sensitive to
588 * ??? act_state or remote_act_state?
590 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
591 if (part_uv
->act_state_req
== 0)
592 xpc_activate_IRQ_rcvd
++;
593 part_uv
->act_state_req
= XPC_P_ASR_REACTIVATE_UV
;
594 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
596 (*wakeup_hb_checker
)++;
601 xpc_handle_activate_IRQ_uv(int irq
, void *dev_id
)
603 struct xpc_activate_mq_msghdr_uv
*msg_hdr
;
605 struct xpc_partition
*part
;
606 int wakeup_hb_checker
= 0;
610 msg_hdr
= gru_get_next_message(xpc_activate_mq_uv
->gru_mq_desc
);
614 partid
= msg_hdr
->partid
;
615 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
616 dev_err(xpc_part
, "xpc_handle_activate_IRQ_uv() "
617 "received invalid partid=0x%x in message\n",
620 part
= &xpc_partitions
[partid
];
622 part_referenced
= xpc_part_ref(part
);
623 xpc_handle_activate_mq_msg_uv(part
, msg_hdr
,
626 xpc_part_deref(part
);
629 gru_free_message(xpc_activate_mq_uv
->gru_mq_desc
, msg_hdr
);
632 if (wakeup_hb_checker
)
633 wake_up_interruptible(&xpc_activate_IRQ_wq
);
638 static enum xp_retval
639 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc
*gru_mq_desc
,
640 unsigned long gru_mq_desc_gpa
)
644 ret
= xp_remote_memcpy(uv_gpa(gru_mq_desc
), gru_mq_desc_gpa
,
645 sizeof(struct gru_message_queue_desc
));
646 if (ret
== xpSuccess
)
647 gru_mq_desc
->mq
= NULL
;
652 static enum xp_retval
653 xpc_send_activate_IRQ_uv(struct xpc_partition
*part
, void *msg
, size_t msg_size
,
656 struct xpc_activate_mq_msghdr_uv
*msg_hdr
= msg
;
657 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
658 struct gru_message_queue_desc
*gru_mq_desc
;
659 unsigned long irq_flags
;
662 DBUG_ON(msg_size
> XPC_ACTIVATE_MSG_SIZE_UV
);
664 msg_hdr
->type
= msg_type
;
665 msg_hdr
->partid
= xp_partition_id
;
666 msg_hdr
->act_state
= part
->act_state
;
667 msg_hdr
->rp_ts_jiffies
= xpc_rsvd_page
->ts_jiffies
;
669 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
671 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
)) {
672 gru_mq_desc
= part_uv
->cached_activate_gru_mq_desc
;
673 if (gru_mq_desc
== NULL
) {
674 gru_mq_desc
= kmalloc(sizeof(struct
675 gru_message_queue_desc
),
677 if (gru_mq_desc
== NULL
) {
681 part_uv
->cached_activate_gru_mq_desc
= gru_mq_desc
;
684 ret
= xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc
,
686 activate_gru_mq_desc_gpa
);
687 if (ret
!= xpSuccess
)
690 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
691 part_uv
->flags
|= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
692 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
695 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
696 ret
= xpc_send_gru_msg(part_uv
->cached_activate_gru_mq_desc
, msg
,
698 if (ret
!= xpSuccess
) {
699 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
700 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
))
704 mutex_unlock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
709 xpc_send_activate_IRQ_part_uv(struct xpc_partition
*part
, void *msg
,
710 size_t msg_size
, int msg_type
)
714 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
715 if (unlikely(ret
!= xpSuccess
))
716 XPC_DEACTIVATE_PARTITION(part
, ret
);
720 xpc_send_activate_IRQ_ch_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
,
721 void *msg
, size_t msg_size
, int msg_type
)
723 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
726 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
727 if (unlikely(ret
!= xpSuccess
)) {
728 if (irq_flags
!= NULL
)
729 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
731 XPC_DEACTIVATE_PARTITION(part
, ret
);
733 if (irq_flags
!= NULL
)
734 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
739 xpc_send_local_activate_IRQ_uv(struct xpc_partition
*part
, int act_state_req
)
741 unsigned long irq_flags
;
742 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
745 * !!! Make our side think that the remote partition sent an activate
746 * !!! mq message our way by doing what the activate IRQ handler would
747 * !!! do had one really been sent.
750 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
751 if (part_uv
->act_state_req
== 0)
752 xpc_activate_IRQ_rcvd
++;
753 part_uv
->act_state_req
= act_state_req
;
754 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
756 wake_up_interruptible(&xpc_activate_IRQ_wq
);
759 static enum xp_retval
760 xpc_get_partition_rsvd_page_pa_uv(void *buf
, u64
*cookie
, unsigned long *rp_pa
,
766 #if defined CONFIG_X86_64
767 status
= uv_bios_reserved_page_pa((u64
)buf
, cookie
, (u64
*)rp_pa
,
769 if (status
== BIOS_STATUS_SUCCESS
)
771 else if (status
== BIOS_STATUS_MORE_PASSES
)
772 ret
= xpNeedMoreInfo
;
776 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
777 status
= sn_partition_reserved_page_pa((u64
)buf
, cookie
, rp_pa
, len
);
778 if (status
== SALRET_OK
)
780 else if (status
== SALRET_MORE_PASSES
)
781 ret
= xpNeedMoreInfo
;
786 #error not a supported configuration
793 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page
*rp
)
796 &xpc_partitions
[sn_partition_id
].sn
.uv
.cached_heartbeat
;
797 rp
->sn
.uv
.heartbeat_gpa
= uv_gpa(xpc_heartbeat_uv
);
798 rp
->sn
.uv
.activate_gru_mq_desc_gpa
=
799 uv_gpa(xpc_activate_mq_uv
->gru_mq_desc
);
804 xpc_allow_hb_uv(short partid
)
809 xpc_disallow_hb_uv(short partid
)
814 xpc_disallow_all_hbs_uv(void)
819 xpc_increment_heartbeat_uv(void)
821 xpc_heartbeat_uv
->value
++;
825 xpc_offline_heartbeat_uv(void)
827 xpc_increment_heartbeat_uv();
828 xpc_heartbeat_uv
->offline
= 1;
832 xpc_online_heartbeat_uv(void)
834 xpc_increment_heartbeat_uv();
835 xpc_heartbeat_uv
->offline
= 0;
839 xpc_heartbeat_init_uv(void)
841 xpc_heartbeat_uv
->value
= 1;
842 xpc_heartbeat_uv
->offline
= 0;
846 xpc_heartbeat_exit_uv(void)
848 xpc_offline_heartbeat_uv();
851 static enum xp_retval
852 xpc_get_remote_heartbeat_uv(struct xpc_partition
*part
)
854 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
857 ret
= xp_remote_memcpy(uv_gpa(&part_uv
->cached_heartbeat
),
858 part_uv
->heartbeat_gpa
,
859 sizeof(struct xpc_heartbeat_uv
));
860 if (ret
!= xpSuccess
)
863 if (part_uv
->cached_heartbeat
.value
== part
->last_heartbeat
&&
864 !part_uv
->cached_heartbeat
.offline
) {
868 part
->last_heartbeat
= part_uv
->cached_heartbeat
.value
;
874 xpc_request_partition_activation_uv(struct xpc_rsvd_page
*remote_rp
,
875 unsigned long remote_rp_gpa
, int nasid
)
877 short partid
= remote_rp
->SAL_partid
;
878 struct xpc_partition
*part
= &xpc_partitions
[partid
];
879 struct xpc_activate_mq_msg_activate_req_uv msg
;
881 part
->remote_rp_pa
= remote_rp_gpa
; /* !!! _pa here is really _gpa */
882 part
->remote_rp_ts_jiffies
= remote_rp
->ts_jiffies
;
883 part
->sn
.uv
.heartbeat_gpa
= remote_rp
->sn
.uv
.heartbeat_gpa
;
884 part
->sn
.uv
.activate_gru_mq_desc_gpa
=
885 remote_rp
->sn
.uv
.activate_gru_mq_desc_gpa
;
888 * ??? Is it a good idea to make this conditional on what is
889 * ??? potentially stale state information?
891 if (part
->sn
.uv
.remote_act_state
== XPC_P_AS_INACTIVE
) {
892 msg
.rp_gpa
= uv_gpa(xpc_rsvd_page
);
893 msg
.heartbeat_gpa
= xpc_rsvd_page
->sn
.uv
.heartbeat_gpa
;
894 msg
.activate_gru_mq_desc_gpa
=
895 xpc_rsvd_page
->sn
.uv
.activate_gru_mq_desc_gpa
;
896 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
897 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
);
900 if (part
->act_state
== XPC_P_AS_INACTIVE
)
901 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
905 xpc_request_partition_reactivation_uv(struct xpc_partition
*part
)
907 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
911 xpc_request_partition_deactivation_uv(struct xpc_partition
*part
)
913 struct xpc_activate_mq_msg_deactivate_req_uv msg
;
916 * ??? Is it a good idea to make this conditional on what is
917 * ??? potentially stale state information?
919 if (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_DEACTIVATING
&&
920 part
->sn
.uv
.remote_act_state
!= XPC_P_AS_INACTIVE
) {
922 msg
.reason
= part
->reason
;
923 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
924 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
);
929 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition
*part
)
931 /* nothing needs to be done */
936 xpc_init_fifo_uv(struct xpc_fifo_head_uv
*head
)
940 spin_lock_init(&head
->lock
);
945 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv
*head
)
947 unsigned long irq_flags
;
948 struct xpc_fifo_entry_uv
*first
;
950 spin_lock_irqsave(&head
->lock
, irq_flags
);
952 if (head
->first
!= NULL
) {
953 head
->first
= first
->next
;
954 if (head
->first
== NULL
)
958 BUG_ON(head
->n_entries
< 0);
962 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
967 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv
*head
,
968 struct xpc_fifo_entry_uv
*last
)
970 unsigned long irq_flags
;
973 spin_lock_irqsave(&head
->lock
, irq_flags
);
974 if (head
->last
!= NULL
)
975 head
->last
->next
= last
;
980 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
984 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv
*head
)
986 return head
->n_entries
;
990 * Setup the channel structures that are uv specific.
992 static enum xp_retval
993 xpc_setup_ch_structures_uv(struct xpc_partition
*part
)
995 struct xpc_channel_uv
*ch_uv
;
998 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
999 ch_uv
= &part
->channels
[ch_number
].sn
.uv
;
1001 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1002 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1009 * Teardown the channel structures that are uv specific.
1012 xpc_teardown_ch_structures_uv(struct xpc_partition
*part
)
1014 /* nothing needs to be done */
1018 static enum xp_retval
1019 xpc_make_first_contact_uv(struct xpc_partition
*part
)
1021 struct xpc_activate_mq_msg_uv msg
;
1024 * We send a sync msg to get the remote partition's remote_act_state
1025 * updated to our current act_state which at this point should
1026 * be XPC_P_AS_ACTIVATING.
1028 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1029 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
);
1031 while (!((part
->sn
.uv
.remote_act_state
== XPC_P_AS_ACTIVATING
) ||
1032 (part
->sn
.uv
.remote_act_state
== XPC_P_AS_ACTIVE
))) {
1034 dev_dbg(xpc_part
, "waiting to make first contact with "
1035 "partition %d\n", XPC_PARTID(part
));
1037 /* wait a 1/4 of a second or so */
1038 (void)msleep_interruptible(250);
1040 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
1041 return part
->reason
;
1048 xpc_get_chctl_all_flags_uv(struct xpc_partition
*part
)
1050 unsigned long irq_flags
;
1051 union xpc_channel_ctl_flags chctl
;
1053 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1054 chctl
= part
->chctl
;
1055 if (chctl
.all_flags
!= 0)
1056 part
->chctl
.all_flags
= 0;
1058 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1059 return chctl
.all_flags
;
1062 static enum xp_retval
1063 xpc_allocate_send_msg_slot_uv(struct xpc_channel
*ch
)
1065 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1066 struct xpc_send_msg_slot_uv
*msg_slot
;
1067 unsigned long irq_flags
;
1072 for (nentries
= ch
->local_nentries
; nentries
> 0; nentries
--) {
1073 nbytes
= nentries
* sizeof(struct xpc_send_msg_slot_uv
);
1074 ch_uv
->send_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1075 if (ch_uv
->send_msg_slots
== NULL
)
1078 for (entry
= 0; entry
< nentries
; entry
++) {
1079 msg_slot
= &ch_uv
->send_msg_slots
[entry
];
1081 msg_slot
->msg_slot_number
= entry
;
1082 xpc_put_fifo_entry_uv(&ch_uv
->msg_slot_free_list
,
1086 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1087 if (nentries
< ch
->local_nentries
)
1088 ch
->local_nentries
= nentries
;
1089 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1096 static enum xp_retval
1097 xpc_allocate_recv_msg_slot_uv(struct xpc_channel
*ch
)
1099 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1100 struct xpc_notify_mq_msg_uv
*msg_slot
;
1101 unsigned long irq_flags
;
1106 for (nentries
= ch
->remote_nentries
; nentries
> 0; nentries
--) {
1107 nbytes
= nentries
* ch
->entry_size
;
1108 ch_uv
->recv_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1109 if (ch_uv
->recv_msg_slots
== NULL
)
1112 for (entry
= 0; entry
< nentries
; entry
++) {
1113 msg_slot
= ch_uv
->recv_msg_slots
+
1114 entry
* ch
->entry_size
;
1116 msg_slot
->hdr
.msg_slot_number
= entry
;
1119 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1120 if (nentries
< ch
->remote_nentries
)
1121 ch
->remote_nentries
= nentries
;
1122 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1130 * Allocate msg_slots associated with the channel.
1132 static enum xp_retval
1133 xpc_setup_msg_structures_uv(struct xpc_channel
*ch
)
1135 static enum xp_retval ret
;
1136 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1138 DBUG_ON(ch
->flags
& XPC_C_SETUP
);
1140 ch_uv
->cached_notify_gru_mq_desc
= kmalloc(sizeof(struct
1141 gru_message_queue_desc
),
1143 if (ch_uv
->cached_notify_gru_mq_desc
== NULL
)
1146 ret
= xpc_allocate_send_msg_slot_uv(ch
);
1147 if (ret
== xpSuccess
) {
1149 ret
= xpc_allocate_recv_msg_slot_uv(ch
);
1150 if (ret
!= xpSuccess
) {
1151 kfree(ch_uv
->send_msg_slots
);
1152 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1159 * Free up msg_slots and clear other stuff that were setup for the specified
1163 xpc_teardown_msg_structures_uv(struct xpc_channel
*ch
)
1165 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1167 DBUG_ON(!spin_is_locked(&ch
->lock
));
1169 kfree(ch_uv
->cached_notify_gru_mq_desc
);
1170 ch_uv
->cached_notify_gru_mq_desc
= NULL
;
1172 if (ch
->flags
& XPC_C_SETUP
) {
1173 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1174 kfree(ch_uv
->send_msg_slots
);
1175 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1176 kfree(ch_uv
->recv_msg_slots
);
1181 xpc_send_chctl_closerequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1183 struct xpc_activate_mq_msg_chctl_closerequest_uv msg
;
1185 msg
.ch_number
= ch
->number
;
1186 msg
.reason
= ch
->reason
;
1187 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1188 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
);
1192 xpc_send_chctl_closereply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1194 struct xpc_activate_mq_msg_chctl_closereply_uv msg
;
1196 msg
.ch_number
= ch
->number
;
1197 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1198 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
);
1202 xpc_send_chctl_openrequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1204 struct xpc_activate_mq_msg_chctl_openrequest_uv msg
;
1206 msg
.ch_number
= ch
->number
;
1207 msg
.entry_size
= ch
->entry_size
;
1208 msg
.local_nentries
= ch
->local_nentries
;
1209 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1210 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
);
1214 xpc_send_chctl_openreply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1216 struct xpc_activate_mq_msg_chctl_openreply_uv msg
;
1218 msg
.ch_number
= ch
->number
;
1219 msg
.local_nentries
= ch
->local_nentries
;
1220 msg
.remote_nentries
= ch
->remote_nentries
;
1221 msg
.notify_gru_mq_desc_gpa
= uv_gpa(xpc_notify_mq_uv
->gru_mq_desc
);
1222 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1223 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
);
1227 xpc_send_chctl_opencomplete_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1229 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg
;
1231 msg
.ch_number
= ch
->number
;
1232 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1233 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV
);
1237 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition
*part
, int ch_number
)
1239 unsigned long irq_flags
;
1241 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1242 part
->chctl
.flags
[ch_number
] |= XPC_CHCTL_MSGREQUEST
;
1243 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1245 xpc_wakeup_channel_mgr(part
);
1248 static enum xp_retval
1249 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel
*ch
,
1250 unsigned long gru_mq_desc_gpa
)
1252 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1254 DBUG_ON(ch_uv
->cached_notify_gru_mq_desc
== NULL
);
1255 return xpc_cache_remote_gru_mq_desc_uv(ch_uv
->cached_notify_gru_mq_desc
,
1260 xpc_indicate_partition_engaged_uv(struct xpc_partition
*part
)
1262 struct xpc_activate_mq_msg_uv msg
;
1264 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1265 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
);
1269 xpc_indicate_partition_disengaged_uv(struct xpc_partition
*part
)
1271 struct xpc_activate_mq_msg_uv msg
;
1273 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1274 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
);
1278 xpc_assume_partition_disengaged_uv(short partid
)
1280 struct xpc_partition_uv
*part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1281 unsigned long irq_flags
;
1283 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
1284 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
1285 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
1289 xpc_partition_engaged_uv(short partid
)
1291 return (xpc_partitions
[partid
].sn
.uv
.flags
& XPC_P_ENGAGED_UV
) != 0;
1295 xpc_any_partition_engaged_uv(void)
1297 struct xpc_partition_uv
*part_uv
;
1300 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
1301 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1302 if ((part_uv
->flags
& XPC_P_ENGAGED_UV
) != 0)
1308 static enum xp_retval
1309 xpc_allocate_msg_slot_uv(struct xpc_channel
*ch
, u32 flags
,
1310 struct xpc_send_msg_slot_uv
**address_of_msg_slot
)
1313 struct xpc_send_msg_slot_uv
*msg_slot
;
1314 struct xpc_fifo_entry_uv
*entry
;
1317 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
);
1321 if (flags
& XPC_NOWAIT
)
1324 ret
= xpc_allocate_msg_wait(ch
);
1325 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
1329 msg_slot
= container_of(entry
, struct xpc_send_msg_slot_uv
, next
);
1330 *address_of_msg_slot
= msg_slot
;
1335 xpc_free_msg_slot_uv(struct xpc_channel
*ch
,
1336 struct xpc_send_msg_slot_uv
*msg_slot
)
1338 xpc_put_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
, &msg_slot
->next
);
1340 /* wakeup anyone waiting for a free msg slot */
1341 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1342 wake_up(&ch
->msg_allocate_wq
);
1346 xpc_notify_sender_uv(struct xpc_channel
*ch
,
1347 struct xpc_send_msg_slot_uv
*msg_slot
,
1348 enum xp_retval reason
)
1350 xpc_notify_func func
= msg_slot
->func
;
1352 if (func
!= NULL
&& cmpxchg(&msg_slot
->func
, func
, NULL
) == func
) {
1354 atomic_dec(&ch
->n_to_notify
);
1356 dev_dbg(xpc_chan
, "msg_slot->func() called, msg_slot=0x%p "
1357 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1358 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1360 func(reason
, ch
->partid
, ch
->number
, msg_slot
->key
);
1362 dev_dbg(xpc_chan
, "msg_slot->func() returned, msg_slot=0x%p "
1363 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1364 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1369 xpc_handle_notify_mq_ack_uv(struct xpc_channel
*ch
,
1370 struct xpc_notify_mq_msg_uv
*msg
)
1372 struct xpc_send_msg_slot_uv
*msg_slot
;
1373 int entry
= msg
->hdr
.msg_slot_number
% ch
->local_nentries
;
1375 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1377 BUG_ON(msg_slot
->msg_slot_number
!= msg
->hdr
.msg_slot_number
);
1378 msg_slot
->msg_slot_number
+= ch
->local_nentries
;
1380 if (msg_slot
->func
!= NULL
)
1381 xpc_notify_sender_uv(ch
, msg_slot
, xpMsgDelivered
);
1383 xpc_free_msg_slot_uv(ch
, msg_slot
);
1387 xpc_handle_notify_mq_msg_uv(struct xpc_partition
*part
,
1388 struct xpc_notify_mq_msg_uv
*msg
)
1390 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
1391 struct xpc_channel
*ch
;
1392 struct xpc_channel_uv
*ch_uv
;
1393 struct xpc_notify_mq_msg_uv
*msg_slot
;
1394 unsigned long irq_flags
;
1395 int ch_number
= msg
->hdr
.ch_number
;
1397 if (unlikely(ch_number
>= part
->nchannels
)) {
1398 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received invalid "
1399 "channel number=0x%x in message from partid=%d\n",
1400 ch_number
, XPC_PARTID(part
));
1402 /* get hb checker to deactivate from the remote partition */
1403 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1404 if (part_uv
->act_state_req
== 0)
1405 xpc_activate_IRQ_rcvd
++;
1406 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
1407 part_uv
->reason
= xpBadChannelNumber
;
1408 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1410 wake_up_interruptible(&xpc_activate_IRQ_wq
);
1414 ch
= &part
->channels
[ch_number
];
1415 xpc_msgqueue_ref(ch
);
1417 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1418 xpc_msgqueue_deref(ch
);
1422 /* see if we're really dealing with an ACK for a previously sent msg */
1423 if (msg
->hdr
.size
== 0) {
1424 xpc_handle_notify_mq_ack_uv(ch
, msg
);
1425 xpc_msgqueue_deref(ch
);
1429 /* we're dealing with a normal message sent via the notify_mq */
1432 msg_slot
= ch_uv
->recv_msg_slots
+
1433 (msg
->hdr
.msg_slot_number
% ch
->remote_nentries
) * ch
->entry_size
;
1435 BUG_ON(msg_slot
->hdr
.size
!= 0);
1437 memcpy(msg_slot
, msg
, msg
->hdr
.size
);
1439 xpc_put_fifo_entry_uv(&ch_uv
->recv_msg_list
, &msg_slot
->hdr
.u
.next
);
1441 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
) {
1443 * If there is an existing idle kthread get it to deliver
1444 * the payload, otherwise we'll have to get the channel mgr
1445 * for this partition to create a kthread to do the delivery.
1447 if (atomic_read(&ch
->kthreads_idle
) > 0)
1448 wake_up_nr(&ch
->idle_wq
, 1);
1450 xpc_send_chctl_local_msgrequest_uv(part
, ch
->number
);
1452 xpc_msgqueue_deref(ch
);
1456 xpc_handle_notify_IRQ_uv(int irq
, void *dev_id
)
1458 struct xpc_notify_mq_msg_uv
*msg
;
1460 struct xpc_partition
*part
;
1462 while ((msg
= gru_get_next_message(xpc_notify_mq_uv
->gru_mq_desc
)) !=
1465 partid
= msg
->hdr
.partid
;
1466 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
1467 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received "
1468 "invalid partid=0x%x in message\n", partid
);
1470 part
= &xpc_partitions
[partid
];
1472 if (xpc_part_ref(part
)) {
1473 xpc_handle_notify_mq_msg_uv(part
, msg
);
1474 xpc_part_deref(part
);
1478 gru_free_message(xpc_notify_mq_uv
->gru_mq_desc
, msg
);
1485 xpc_n_of_deliverable_payloads_uv(struct xpc_channel
*ch
)
1487 return xpc_n_of_fifo_entries_uv(&ch
->sn
.uv
.recv_msg_list
);
1491 xpc_process_msg_chctl_flags_uv(struct xpc_partition
*part
, int ch_number
)
1493 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1494 int ndeliverable_payloads
;
1496 xpc_msgqueue_ref(ch
);
1498 ndeliverable_payloads
= xpc_n_of_deliverable_payloads_uv(ch
);
1500 if (ndeliverable_payloads
> 0 &&
1501 (ch
->flags
& XPC_C_CONNECTED
) &&
1502 (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)) {
1504 xpc_activate_kthreads(ch
, ndeliverable_payloads
);
1507 xpc_msgqueue_deref(ch
);
1510 static enum xp_retval
1511 xpc_send_payload_uv(struct xpc_channel
*ch
, u32 flags
, void *payload
,
1512 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
1515 enum xp_retval ret
= xpSuccess
;
1516 struct xpc_send_msg_slot_uv
*msg_slot
= NULL
;
1517 struct xpc_notify_mq_msg_uv
*msg
;
1518 u8 msg_buffer
[XPC_NOTIFY_MSG_SIZE_UV
];
1521 DBUG_ON(notify_type
!= XPC_N_CALL
);
1523 msg_size
= sizeof(struct xpc_notify_mq_msghdr_uv
) + payload_size
;
1524 if (msg_size
> ch
->entry_size
)
1525 return xpPayloadTooBig
;
1527 xpc_msgqueue_ref(ch
);
1529 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1533 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1534 ret
= xpNotConnected
;
1538 ret
= xpc_allocate_msg_slot_uv(ch
, flags
, &msg_slot
);
1539 if (ret
!= xpSuccess
)
1543 atomic_inc(&ch
->n_to_notify
);
1545 msg_slot
->key
= key
;
1546 smp_wmb(); /* a non-NULL func must hit memory after the key */
1547 msg_slot
->func
= func
;
1549 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1555 msg
= (struct xpc_notify_mq_msg_uv
*)&msg_buffer
;
1556 msg
->hdr
.partid
= xp_partition_id
;
1557 msg
->hdr
.ch_number
= ch
->number
;
1558 msg
->hdr
.size
= msg_size
;
1559 msg
->hdr
.msg_slot_number
= msg_slot
->msg_slot_number
;
1560 memcpy(&msg
->payload
, payload
, payload_size
);
1562 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1564 if (ret
== xpSuccess
)
1567 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1571 * Try to NULL the msg_slot's func field. If we fail, then
1572 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1573 * case we need to pretend we succeeded to send the message
1574 * since the user will get a callout for the disconnect error
1575 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1576 * error returned here will confuse them. Additionally, since
1577 * in this case the channel is being disconnected we don't need
1578 * to put the the msg_slot back on the free list.
1580 if (cmpxchg(&msg_slot
->func
, func
, NULL
) != func
) {
1585 msg_slot
->key
= NULL
;
1586 atomic_dec(&ch
->n_to_notify
);
1588 xpc_free_msg_slot_uv(ch
, msg_slot
);
1590 xpc_msgqueue_deref(ch
);
1595 * Tell the callers of xpc_send_notify() that the status of their payloads
1596 * is unknown because the channel is now disconnecting.
1598 * We don't worry about putting these msg_slots on the free list since the
1599 * msg_slots themselves are about to be kfree'd.
1602 xpc_notify_senders_of_disconnect_uv(struct xpc_channel
*ch
)
1604 struct xpc_send_msg_slot_uv
*msg_slot
;
1607 DBUG_ON(!(ch
->flags
& XPC_C_DISCONNECTING
));
1609 for (entry
= 0; entry
< ch
->local_nentries
; entry
++) {
1611 if (atomic_read(&ch
->n_to_notify
) == 0)
1614 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1615 if (msg_slot
->func
!= NULL
)
1616 xpc_notify_sender_uv(ch
, msg_slot
, ch
->reason
);
1621 * Get the next deliverable message's payload.
1624 xpc_get_deliverable_payload_uv(struct xpc_channel
*ch
)
1626 struct xpc_fifo_entry_uv
*entry
;
1627 struct xpc_notify_mq_msg_uv
*msg
;
1628 void *payload
= NULL
;
1630 if (!(ch
->flags
& XPC_C_DISCONNECTING
)) {
1631 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.recv_msg_list
);
1632 if (entry
!= NULL
) {
1633 msg
= container_of(entry
, struct xpc_notify_mq_msg_uv
,
1635 payload
= &msg
->payload
;
1642 xpc_received_payload_uv(struct xpc_channel
*ch
, void *payload
)
1644 struct xpc_notify_mq_msg_uv
*msg
;
1647 msg
= container_of(payload
, struct xpc_notify_mq_msg_uv
, payload
);
1649 /* return an ACK to the sender of this message */
1651 msg
->hdr
.partid
= xp_partition_id
;
1652 msg
->hdr
.size
= 0; /* size of zero indicates this is an ACK */
1654 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1655 sizeof(struct xpc_notify_mq_msghdr_uv
));
1656 if (ret
!= xpSuccess
)
1657 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1660 static struct xpc_arch_operations xpc_arch_ops_uv
= {
1661 .setup_partitions
= xpc_setup_partitions_uv
,
1662 .teardown_partitions
= xpc_teardown_partitions_uv
,
1663 .process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_uv
,
1664 .get_partition_rsvd_page_pa
= xpc_get_partition_rsvd_page_pa_uv
,
1665 .setup_rsvd_page
= xpc_setup_rsvd_page_uv
,
1667 .allow_hb
= xpc_allow_hb_uv
,
1668 .disallow_hb
= xpc_disallow_hb_uv
,
1669 .disallow_all_hbs
= xpc_disallow_all_hbs_uv
,
1670 .increment_heartbeat
= xpc_increment_heartbeat_uv
,
1671 .offline_heartbeat
= xpc_offline_heartbeat_uv
,
1672 .online_heartbeat
= xpc_online_heartbeat_uv
,
1673 .heartbeat_init
= xpc_heartbeat_init_uv
,
1674 .heartbeat_exit
= xpc_heartbeat_exit_uv
,
1675 .get_remote_heartbeat
= xpc_get_remote_heartbeat_uv
,
1677 .request_partition_activation
=
1678 xpc_request_partition_activation_uv
,
1679 .request_partition_reactivation
=
1680 xpc_request_partition_reactivation_uv
,
1681 .request_partition_deactivation
=
1682 xpc_request_partition_deactivation_uv
,
1683 .cancel_partition_deactivation_request
=
1684 xpc_cancel_partition_deactivation_request_uv
,
1686 .setup_ch_structures
= xpc_setup_ch_structures_uv
,
1687 .teardown_ch_structures
= xpc_teardown_ch_structures_uv
,
1689 .make_first_contact
= xpc_make_first_contact_uv
,
1691 .get_chctl_all_flags
= xpc_get_chctl_all_flags_uv
,
1692 .send_chctl_closerequest
= xpc_send_chctl_closerequest_uv
,
1693 .send_chctl_closereply
= xpc_send_chctl_closereply_uv
,
1694 .send_chctl_openrequest
= xpc_send_chctl_openrequest_uv
,
1695 .send_chctl_openreply
= xpc_send_chctl_openreply_uv
,
1696 .send_chctl_opencomplete
= xpc_send_chctl_opencomplete_uv
,
1697 .process_msg_chctl_flags
= xpc_process_msg_chctl_flags_uv
,
1699 .save_remote_msgqueue_pa
= xpc_save_remote_msgqueue_pa_uv
,
1701 .setup_msg_structures
= xpc_setup_msg_structures_uv
,
1702 .teardown_msg_structures
= xpc_teardown_msg_structures_uv
,
1704 .indicate_partition_engaged
= xpc_indicate_partition_engaged_uv
,
1705 .indicate_partition_disengaged
= xpc_indicate_partition_disengaged_uv
,
1706 .assume_partition_disengaged
= xpc_assume_partition_disengaged_uv
,
1707 .partition_engaged
= xpc_partition_engaged_uv
,
1708 .any_partition_engaged
= xpc_any_partition_engaged_uv
,
1710 .n_of_deliverable_payloads
= xpc_n_of_deliverable_payloads_uv
,
1711 .send_payload
= xpc_send_payload_uv
,
1712 .get_deliverable_payload
= xpc_get_deliverable_payload_uv
,
1713 .received_payload
= xpc_received_payload_uv
,
1714 .notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_uv
,
1720 xpc_arch_ops
= xpc_arch_ops_uv
;
1722 if (sizeof(struct xpc_notify_mq_msghdr_uv
) > XPC_MSG_HDR_MAX_SIZE
) {
1723 dev_err(xpc_part
, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1724 XPC_MSG_HDR_MAX_SIZE
);
1728 xpc_activate_mq_uv
= xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV
, 0,
1729 XPC_ACTIVATE_IRQ_NAME
,
1730 xpc_handle_activate_IRQ_uv
);
1731 if (IS_ERR(xpc_activate_mq_uv
))
1732 return PTR_ERR(xpc_activate_mq_uv
);
1734 xpc_notify_mq_uv
= xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV
, 0,
1735 XPC_NOTIFY_IRQ_NAME
,
1736 xpc_handle_notify_IRQ_uv
);
1737 if (IS_ERR(xpc_notify_mq_uv
)) {
1738 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);
1739 return PTR_ERR(xpc_notify_mq_uv
);
1748 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv
);
1749 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);