2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/cpu.h>
22 #include <linux/module.h>
23 #include <linux/err.h>
24 #include <linux/slab.h>
25 #include <asm/uv/uv_hub.h>
26 #if defined CONFIG_X86_64
27 #include <asm/uv/bios.h>
28 #include <asm/uv/uv_irq.h>
29 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
30 #include <asm/sn/intr.h>
31 #include <asm/sn/sn_sal.h>
33 #include "../sgi-gru/gru.h"
34 #include "../sgi-gru/grukservices.h"
37 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
38 struct uv_IO_APIC_route_entry
{
52 static struct xpc_heartbeat_uv
*xpc_heartbeat_uv
;
54 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
55 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
56 XPC_ACTIVATE_MSG_SIZE_UV)
57 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
59 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
60 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
61 XPC_NOTIFY_MSG_SIZE_UV)
62 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
64 static int xpc_mq_node
= -1;
66 static struct xpc_gru_mq_uv
*xpc_activate_mq_uv
;
67 static struct xpc_gru_mq_uv
*xpc_notify_mq_uv
;
70 xpc_setup_partitions_uv(void)
73 struct xpc_partition_uv
*part_uv
;
75 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
76 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
78 mutex_init(&part_uv
->cached_activate_gru_mq_desc_mutex
);
79 spin_lock_init(&part_uv
->flags_lock
);
80 part_uv
->remote_act_state
= XPC_P_AS_INACTIVE
;
86 xpc_teardown_partitions_uv(void)
89 struct xpc_partition_uv
*part_uv
;
90 unsigned long irq_flags
;
92 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
93 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
95 if (part_uv
->cached_activate_gru_mq_desc
!= NULL
) {
96 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
97 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
98 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
99 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
100 kfree(part_uv
->cached_activate_gru_mq_desc
);
101 part_uv
->cached_activate_gru_mq_desc
= NULL
;
102 mutex_unlock(&part_uv
->
103 cached_activate_gru_mq_desc_mutex
);
109 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
, int cpu
, char *irq_name
)
111 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
113 #if defined CONFIG_X86_64
114 mq
->irq
= uv_setup_irq(irq_name
, cpu
, mq
->mmr_blade
, mq
->mmr_offset
,
119 mq
->mmr_value
= uv_read_global_mmr64(mmr_pnode
, mq
->mmr_offset
);
121 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
122 if (strcmp(irq_name
, XPC_ACTIVATE_IRQ_NAME
) == 0)
123 mq
->irq
= SGI_XPC_ACTIVATE
;
124 else if (strcmp(irq_name
, XPC_NOTIFY_IRQ_NAME
) == 0)
125 mq
->irq
= SGI_XPC_NOTIFY
;
129 mq
->mmr_value
= (unsigned long)cpu_physical_id(cpu
) << 32 | mq
->irq
;
130 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mq
->mmr_value
);
132 #error not a supported configuration
139 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
)
141 #if defined CONFIG_X86_64
142 uv_teardown_irq(mq
->irq
);
144 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
146 unsigned long mmr_value
;
148 mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
149 mmr_value
= 1UL << 16;
151 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mmr_value
);
153 #error not a supported configuration
158 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv
*mq
)
162 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
163 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
165 ret
= sn_mq_watchlist_alloc(mmr_pnode
, (void *)uv_gpa(mq
->address
),
166 mq
->order
, &mq
->mmr_offset
);
168 dev_err(xpc_part
, "sn_mq_watchlist_alloc() failed, ret=%d\n",
172 #elif defined CONFIG_X86_64
173 ret
= uv_bios_mq_watchlist_alloc(uv_gpa(mq
->address
),
174 mq
->order
, &mq
->mmr_offset
);
176 dev_err(xpc_part
, "uv_bios_mq_watchlist_alloc() failed, "
181 #error not a supported configuration
184 mq
->watchlist_num
= ret
;
189 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv
*mq
)
192 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
194 #if defined CONFIG_X86_64
195 ret
= uv_bios_mq_watchlist_free(mmr_pnode
, mq
->watchlist_num
);
196 BUG_ON(ret
!= BIOS_STATUS_SUCCESS
);
197 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
198 ret
= sn_mq_watchlist_free(mmr_pnode
, mq
->watchlist_num
);
199 BUG_ON(ret
!= SALRET_OK
);
201 #error not a supported configuration
205 static struct xpc_gru_mq_uv
*
206 xpc_create_gru_mq_uv(unsigned int mq_size
, int cpu
, char *irq_name
,
207 irq_handler_t irq_handler
)
209 enum xp_retval xp_ret
;
215 struct xpc_gru_mq_uv
*mq
;
216 struct uv_IO_APIC_route_entry
*mmr_value
;
218 mq
= kmalloc(sizeof(struct xpc_gru_mq_uv
), GFP_KERNEL
);
220 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
221 "a xpc_gru_mq_uv structure\n");
226 mq
->gru_mq_desc
= kzalloc(sizeof(struct gru_message_queue_desc
),
228 if (mq
->gru_mq_desc
== NULL
) {
229 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
230 "a gru_message_queue_desc structure\n");
235 pg_order
= get_order(mq_size
);
236 mq
->order
= pg_order
+ PAGE_SHIFT
;
237 mq_size
= 1UL << mq
->order
;
239 mq
->mmr_blade
= uv_cpu_to_blade_id(cpu
);
241 nid
= cpu_to_node(cpu
);
242 page
= __alloc_pages_node(nid
,
243 GFP_KERNEL
| __GFP_ZERO
| __GFP_THISNODE
,
246 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to alloc %d "
247 "bytes of memory on nid=%d for GRU mq\n", mq_size
, nid
);
251 mq
->address
= page_address(page
);
253 /* enable generation of irq when GRU mq operation occurs to this mq */
254 ret
= xpc_gru_mq_watchlist_alloc_uv(mq
);
258 ret
= xpc_get_gru_mq_irq_uv(mq
, cpu
, irq_name
);
262 ret
= request_irq(mq
->irq
, irq_handler
, 0, irq_name
, NULL
);
264 dev_err(xpc_part
, "request_irq(irq=%d) returned error=%d\n",
269 nasid
= UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu
));
271 mmr_value
= (struct uv_IO_APIC_route_entry
*)&mq
->mmr_value
;
272 ret
= gru_create_message_queue(mq
->gru_mq_desc
, mq
->address
, mq_size
,
273 nasid
, mmr_value
->vector
, mmr_value
->dest
);
275 dev_err(xpc_part
, "gru_create_message_queue() returned "
281 /* allow other partitions to access this GRU mq */
282 xp_ret
= xp_expand_memprotect(xp_pa(mq
->address
), mq_size
);
283 if (xp_ret
!= xpSuccess
) {
290 /* something went wrong */
292 free_irq(mq
->irq
, NULL
);
294 xpc_release_gru_mq_irq_uv(mq
);
296 xpc_gru_mq_watchlist_free_uv(mq
);
298 free_pages((unsigned long)mq
->address
, pg_order
);
300 kfree(mq
->gru_mq_desc
);
308 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv
*mq
)
310 unsigned int mq_size
;
314 /* disallow other partitions to access GRU mq */
315 mq_size
= 1UL << mq
->order
;
316 ret
= xp_restrict_memprotect(xp_pa(mq
->address
), mq_size
);
317 BUG_ON(ret
!= xpSuccess
);
319 /* unregister irq handler and release mq irq/vector mapping */
320 free_irq(mq
->irq
, NULL
);
321 xpc_release_gru_mq_irq_uv(mq
);
323 /* disable generation of irq when GRU mq op occurs to this mq */
324 xpc_gru_mq_watchlist_free_uv(mq
);
326 pg_order
= mq
->order
- PAGE_SHIFT
;
327 free_pages((unsigned long)mq
->address
, pg_order
);
332 static enum xp_retval
333 xpc_send_gru_msg(struct gru_message_queue_desc
*gru_mq_desc
, void *msg
,
336 enum xp_retval xp_ret
;
340 ret
= gru_send_message_gpa(gru_mq_desc
, msg
, msg_size
);
346 if (ret
== MQE_QUEUE_FULL
) {
347 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
348 "error=MQE_QUEUE_FULL\n");
349 /* !!! handle QLimit reached; delay & try again */
350 /* ??? Do we add a limit to the number of retries? */
351 (void)msleep_interruptible(10);
352 } else if (ret
== MQE_CONGESTION
) {
353 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
354 "error=MQE_CONGESTION\n");
355 /* !!! handle LB Overflow; simply try again */
356 /* ??? Do we add a limit to the number of retries? */
358 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
359 dev_err(xpc_chan
, "gru_send_message_gpa() returned "
361 xp_ret
= xpGruSendMqError
;
369 xpc_process_activate_IRQ_rcvd_uv(void)
371 unsigned long irq_flags
;
373 struct xpc_partition
*part
;
376 DBUG_ON(xpc_activate_IRQ_rcvd
== 0);
378 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
379 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
380 part
= &xpc_partitions
[partid
];
382 if (part
->sn
.uv
.act_state_req
== 0)
385 xpc_activate_IRQ_rcvd
--;
386 BUG_ON(xpc_activate_IRQ_rcvd
< 0);
388 act_state_req
= part
->sn
.uv
.act_state_req
;
389 part
->sn
.uv
.act_state_req
= 0;
390 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
392 if (act_state_req
== XPC_P_ASR_ACTIVATE_UV
) {
393 if (part
->act_state
== XPC_P_AS_INACTIVE
)
394 xpc_activate_partition(part
);
395 else if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
396 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
398 } else if (act_state_req
== XPC_P_ASR_REACTIVATE_UV
) {
399 if (part
->act_state
== XPC_P_AS_INACTIVE
)
400 xpc_activate_partition(part
);
402 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
404 } else if (act_state_req
== XPC_P_ASR_DEACTIVATE_UV
) {
405 XPC_DEACTIVATE_PARTITION(part
, part
->sn
.uv
.reason
);
411 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
412 if (xpc_activate_IRQ_rcvd
== 0)
415 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
420 xpc_handle_activate_mq_msg_uv(struct xpc_partition
*part
,
421 struct xpc_activate_mq_msghdr_uv
*msg_hdr
,
423 int *wakeup_hb_checker
)
425 unsigned long irq_flags
;
426 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
427 struct xpc_openclose_args
*args
;
429 part_uv
->remote_act_state
= msg_hdr
->act_state
;
431 switch (msg_hdr
->type
) {
432 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
:
433 /* syncing of remote_act_state was just done above */
436 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
: {
437 struct xpc_activate_mq_msg_activate_req_uv
*msg
;
440 * ??? Do we deal here with ts_jiffies being different
441 * ??? if act_state != XPC_P_AS_INACTIVE instead of
444 msg
= container_of(msg_hdr
, struct
445 xpc_activate_mq_msg_activate_req_uv
, hdr
);
447 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
448 if (part_uv
->act_state_req
== 0)
449 xpc_activate_IRQ_rcvd
++;
450 part_uv
->act_state_req
= XPC_P_ASR_ACTIVATE_UV
;
451 part
->remote_rp_pa
= msg
->rp_gpa
; /* !!! _pa is _gpa */
452 part
->remote_rp_ts_jiffies
= msg_hdr
->rp_ts_jiffies
;
453 part_uv
->heartbeat_gpa
= msg
->heartbeat_gpa
;
455 if (msg
->activate_gru_mq_desc_gpa
!=
456 part_uv
->activate_gru_mq_desc_gpa
) {
457 spin_lock(&part_uv
->flags_lock
);
458 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
459 spin_unlock(&part_uv
->flags_lock
);
460 part_uv
->activate_gru_mq_desc_gpa
=
461 msg
->activate_gru_mq_desc_gpa
;
463 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
465 (*wakeup_hb_checker
)++;
468 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
: {
469 struct xpc_activate_mq_msg_deactivate_req_uv
*msg
;
471 msg
= container_of(msg_hdr
, struct
472 xpc_activate_mq_msg_deactivate_req_uv
, hdr
);
474 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
475 if (part_uv
->act_state_req
== 0)
476 xpc_activate_IRQ_rcvd
++;
477 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
478 part_uv
->reason
= msg
->reason
;
479 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
481 (*wakeup_hb_checker
)++;
484 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
: {
485 struct xpc_activate_mq_msg_chctl_closerequest_uv
*msg
;
490 msg
= container_of(msg_hdr
, struct
491 xpc_activate_mq_msg_chctl_closerequest_uv
,
493 args
= &part
->remote_openclose_args
[msg
->ch_number
];
494 args
->reason
= msg
->reason
;
496 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
497 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREQUEST
;
498 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
500 xpc_wakeup_channel_mgr(part
);
503 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
: {
504 struct xpc_activate_mq_msg_chctl_closereply_uv
*msg
;
509 msg
= container_of(msg_hdr
, struct
510 xpc_activate_mq_msg_chctl_closereply_uv
,
513 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
514 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREPLY
;
515 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
517 xpc_wakeup_channel_mgr(part
);
520 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
: {
521 struct xpc_activate_mq_msg_chctl_openrequest_uv
*msg
;
526 msg
= container_of(msg_hdr
, struct
527 xpc_activate_mq_msg_chctl_openrequest_uv
,
529 args
= &part
->remote_openclose_args
[msg
->ch_number
];
530 args
->entry_size
= msg
->entry_size
;
531 args
->local_nentries
= msg
->local_nentries
;
533 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
534 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREQUEST
;
535 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
537 xpc_wakeup_channel_mgr(part
);
540 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
: {
541 struct xpc_activate_mq_msg_chctl_openreply_uv
*msg
;
546 msg
= container_of(msg_hdr
, struct
547 xpc_activate_mq_msg_chctl_openreply_uv
, hdr
);
548 args
= &part
->remote_openclose_args
[msg
->ch_number
];
549 args
->remote_nentries
= msg
->remote_nentries
;
550 args
->local_nentries
= msg
->local_nentries
;
551 args
->local_msgqueue_pa
= msg
->notify_gru_mq_desc_gpa
;
553 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
554 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREPLY
;
555 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
557 xpc_wakeup_channel_mgr(part
);
560 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV
: {
561 struct xpc_activate_mq_msg_chctl_opencomplete_uv
*msg
;
566 msg
= container_of(msg_hdr
, struct
567 xpc_activate_mq_msg_chctl_opencomplete_uv
, hdr
);
568 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
569 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENCOMPLETE
;
570 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
572 xpc_wakeup_channel_mgr(part
);
574 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
:
575 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
576 part_uv
->flags
|= XPC_P_ENGAGED_UV
;
577 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
580 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
:
581 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
582 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
583 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
587 dev_err(xpc_part
, "received unknown activate_mq msg type=%d "
588 "from partition=%d\n", msg_hdr
->type
, XPC_PARTID(part
));
590 /* get hb checker to deactivate from the remote partition */
591 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
592 if (part_uv
->act_state_req
== 0)
593 xpc_activate_IRQ_rcvd
++;
594 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
595 part_uv
->reason
= xpBadMsgType
;
596 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
598 (*wakeup_hb_checker
)++;
602 if (msg_hdr
->rp_ts_jiffies
!= part
->remote_rp_ts_jiffies
&&
603 part
->remote_rp_ts_jiffies
!= 0) {
605 * ??? Does what we do here need to be sensitive to
606 * ??? act_state or remote_act_state?
608 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
609 if (part_uv
->act_state_req
== 0)
610 xpc_activate_IRQ_rcvd
++;
611 part_uv
->act_state_req
= XPC_P_ASR_REACTIVATE_UV
;
612 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
614 (*wakeup_hb_checker
)++;
619 xpc_handle_activate_IRQ_uv(int irq
, void *dev_id
)
621 struct xpc_activate_mq_msghdr_uv
*msg_hdr
;
623 struct xpc_partition
*part
;
624 int wakeup_hb_checker
= 0;
628 msg_hdr
= gru_get_next_message(xpc_activate_mq_uv
->gru_mq_desc
);
632 partid
= msg_hdr
->partid
;
633 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
634 dev_err(xpc_part
, "xpc_handle_activate_IRQ_uv() "
635 "received invalid partid=0x%x in message\n",
638 part
= &xpc_partitions
[partid
];
640 part_referenced
= xpc_part_ref(part
);
641 xpc_handle_activate_mq_msg_uv(part
, msg_hdr
,
645 xpc_part_deref(part
);
648 gru_free_message(xpc_activate_mq_uv
->gru_mq_desc
, msg_hdr
);
651 if (wakeup_hb_checker
)
652 wake_up_interruptible(&xpc_activate_IRQ_wq
);
657 static enum xp_retval
658 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc
*gru_mq_desc
,
659 unsigned long gru_mq_desc_gpa
)
663 ret
= xp_remote_memcpy(uv_gpa(gru_mq_desc
), gru_mq_desc_gpa
,
664 sizeof(struct gru_message_queue_desc
));
665 if (ret
== xpSuccess
)
666 gru_mq_desc
->mq
= NULL
;
671 static enum xp_retval
672 xpc_send_activate_IRQ_uv(struct xpc_partition
*part
, void *msg
, size_t msg_size
,
675 struct xpc_activate_mq_msghdr_uv
*msg_hdr
= msg
;
676 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
677 struct gru_message_queue_desc
*gru_mq_desc
;
678 unsigned long irq_flags
;
681 DBUG_ON(msg_size
> XPC_ACTIVATE_MSG_SIZE_UV
);
683 msg_hdr
->type
= msg_type
;
684 msg_hdr
->partid
= xp_partition_id
;
685 msg_hdr
->act_state
= part
->act_state
;
686 msg_hdr
->rp_ts_jiffies
= xpc_rsvd_page
->ts_jiffies
;
688 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
690 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
)) {
691 gru_mq_desc
= part_uv
->cached_activate_gru_mq_desc
;
692 if (gru_mq_desc
== NULL
) {
693 gru_mq_desc
= kmalloc(sizeof(struct
694 gru_message_queue_desc
),
696 if (gru_mq_desc
== NULL
) {
700 part_uv
->cached_activate_gru_mq_desc
= gru_mq_desc
;
703 ret
= xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc
,
705 activate_gru_mq_desc_gpa
);
706 if (ret
!= xpSuccess
)
709 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
710 part_uv
->flags
|= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
711 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
714 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
715 ret
= xpc_send_gru_msg(part_uv
->cached_activate_gru_mq_desc
, msg
,
717 if (ret
!= xpSuccess
) {
718 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
719 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
))
723 mutex_unlock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
728 xpc_send_activate_IRQ_part_uv(struct xpc_partition
*part
, void *msg
,
729 size_t msg_size
, int msg_type
)
733 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
734 if (unlikely(ret
!= xpSuccess
))
735 XPC_DEACTIVATE_PARTITION(part
, ret
);
739 xpc_send_activate_IRQ_ch_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
,
740 void *msg
, size_t msg_size
, int msg_type
)
742 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
745 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
746 if (unlikely(ret
!= xpSuccess
)) {
747 if (irq_flags
!= NULL
)
748 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
750 XPC_DEACTIVATE_PARTITION(part
, ret
);
752 if (irq_flags
!= NULL
)
753 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
758 xpc_send_local_activate_IRQ_uv(struct xpc_partition
*part
, int act_state_req
)
760 unsigned long irq_flags
;
761 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
764 * !!! Make our side think that the remote partition sent an activate
765 * !!! mq message our way by doing what the activate IRQ handler would
766 * !!! do had one really been sent.
769 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
770 if (part_uv
->act_state_req
== 0)
771 xpc_activate_IRQ_rcvd
++;
772 part_uv
->act_state_req
= act_state_req
;
773 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
775 wake_up_interruptible(&xpc_activate_IRQ_wq
);
778 static enum xp_retval
779 xpc_get_partition_rsvd_page_pa_uv(void *buf
, u64
*cookie
, unsigned long *rp_pa
,
785 #if defined CONFIG_X86_64
786 status
= uv_bios_reserved_page_pa((u64
)buf
, cookie
, (u64
*)rp_pa
,
788 if (status
== BIOS_STATUS_SUCCESS
)
790 else if (status
== BIOS_STATUS_MORE_PASSES
)
791 ret
= xpNeedMoreInfo
;
795 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
796 status
= sn_partition_reserved_page_pa((u64
)buf
, cookie
, rp_pa
, len
);
797 if (status
== SALRET_OK
)
799 else if (status
== SALRET_MORE_PASSES
)
800 ret
= xpNeedMoreInfo
;
805 #error not a supported configuration
812 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page
*rp
)
815 &xpc_partitions
[sn_partition_id
].sn
.uv
.cached_heartbeat
;
816 rp
->sn
.uv
.heartbeat_gpa
= uv_gpa(xpc_heartbeat_uv
);
817 rp
->sn
.uv
.activate_gru_mq_desc_gpa
=
818 uv_gpa(xpc_activate_mq_uv
->gru_mq_desc
);
823 xpc_allow_hb_uv(short partid
)
828 xpc_disallow_hb_uv(short partid
)
833 xpc_disallow_all_hbs_uv(void)
838 xpc_increment_heartbeat_uv(void)
840 xpc_heartbeat_uv
->value
++;
844 xpc_offline_heartbeat_uv(void)
846 xpc_increment_heartbeat_uv();
847 xpc_heartbeat_uv
->offline
= 1;
851 xpc_online_heartbeat_uv(void)
853 xpc_increment_heartbeat_uv();
854 xpc_heartbeat_uv
->offline
= 0;
858 xpc_heartbeat_init_uv(void)
860 xpc_heartbeat_uv
->value
= 1;
861 xpc_heartbeat_uv
->offline
= 0;
865 xpc_heartbeat_exit_uv(void)
867 xpc_offline_heartbeat_uv();
870 static enum xp_retval
871 xpc_get_remote_heartbeat_uv(struct xpc_partition
*part
)
873 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
876 ret
= xp_remote_memcpy(uv_gpa(&part_uv
->cached_heartbeat
),
877 part_uv
->heartbeat_gpa
,
878 sizeof(struct xpc_heartbeat_uv
));
879 if (ret
!= xpSuccess
)
882 if (part_uv
->cached_heartbeat
.value
== part
->last_heartbeat
&&
883 !part_uv
->cached_heartbeat
.offline
) {
887 part
->last_heartbeat
= part_uv
->cached_heartbeat
.value
;
893 xpc_request_partition_activation_uv(struct xpc_rsvd_page
*remote_rp
,
894 unsigned long remote_rp_gpa
, int nasid
)
896 short partid
= remote_rp
->SAL_partid
;
897 struct xpc_partition
*part
= &xpc_partitions
[partid
];
898 struct xpc_activate_mq_msg_activate_req_uv msg
;
900 part
->remote_rp_pa
= remote_rp_gpa
; /* !!! _pa here is really _gpa */
901 part
->remote_rp_ts_jiffies
= remote_rp
->ts_jiffies
;
902 part
->sn
.uv
.heartbeat_gpa
= remote_rp
->sn
.uv
.heartbeat_gpa
;
903 part
->sn
.uv
.activate_gru_mq_desc_gpa
=
904 remote_rp
->sn
.uv
.activate_gru_mq_desc_gpa
;
907 * ??? Is it a good idea to make this conditional on what is
908 * ??? potentially stale state information?
910 if (part
->sn
.uv
.remote_act_state
== XPC_P_AS_INACTIVE
) {
911 msg
.rp_gpa
= uv_gpa(xpc_rsvd_page
);
912 msg
.heartbeat_gpa
= xpc_rsvd_page
->sn
.uv
.heartbeat_gpa
;
913 msg
.activate_gru_mq_desc_gpa
=
914 xpc_rsvd_page
->sn
.uv
.activate_gru_mq_desc_gpa
;
915 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
916 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
);
919 if (part
->act_state
== XPC_P_AS_INACTIVE
)
920 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
924 xpc_request_partition_reactivation_uv(struct xpc_partition
*part
)
926 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
930 xpc_request_partition_deactivation_uv(struct xpc_partition
*part
)
932 struct xpc_activate_mq_msg_deactivate_req_uv msg
;
935 * ??? Is it a good idea to make this conditional on what is
936 * ??? potentially stale state information?
938 if (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_DEACTIVATING
&&
939 part
->sn
.uv
.remote_act_state
!= XPC_P_AS_INACTIVE
) {
941 msg
.reason
= part
->reason
;
942 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
943 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
);
948 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition
*part
)
950 /* nothing needs to be done */
955 xpc_init_fifo_uv(struct xpc_fifo_head_uv
*head
)
959 spin_lock_init(&head
->lock
);
964 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv
*head
)
966 unsigned long irq_flags
;
967 struct xpc_fifo_entry_uv
*first
;
969 spin_lock_irqsave(&head
->lock
, irq_flags
);
971 if (head
->first
!= NULL
) {
972 head
->first
= first
->next
;
973 if (head
->first
== NULL
)
977 BUG_ON(head
->n_entries
< 0);
981 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
986 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv
*head
,
987 struct xpc_fifo_entry_uv
*last
)
989 unsigned long irq_flags
;
992 spin_lock_irqsave(&head
->lock
, irq_flags
);
993 if (head
->last
!= NULL
)
994 head
->last
->next
= last
;
999 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
1003 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv
*head
)
1005 return head
->n_entries
;
1009 * Setup the channel structures that are uv specific.
1011 static enum xp_retval
1012 xpc_setup_ch_structures_uv(struct xpc_partition
*part
)
1014 struct xpc_channel_uv
*ch_uv
;
1017 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
1018 ch_uv
= &part
->channels
[ch_number
].sn
.uv
;
1020 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1021 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1028 * Teardown the channel structures that are uv specific.
1031 xpc_teardown_ch_structures_uv(struct xpc_partition
*part
)
1033 /* nothing needs to be done */
1037 static enum xp_retval
1038 xpc_make_first_contact_uv(struct xpc_partition
*part
)
1040 struct xpc_activate_mq_msg_uv msg
;
1043 * We send a sync msg to get the remote partition's remote_act_state
1044 * updated to our current act_state which at this point should
1045 * be XPC_P_AS_ACTIVATING.
1047 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1048 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
);
1050 while (!((part
->sn
.uv
.remote_act_state
== XPC_P_AS_ACTIVATING
) ||
1051 (part
->sn
.uv
.remote_act_state
== XPC_P_AS_ACTIVE
))) {
1053 dev_dbg(xpc_part
, "waiting to make first contact with "
1054 "partition %d\n", XPC_PARTID(part
));
1056 /* wait a 1/4 of a second or so */
1057 (void)msleep_interruptible(250);
1059 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
1060 return part
->reason
;
1067 xpc_get_chctl_all_flags_uv(struct xpc_partition
*part
)
1069 unsigned long irq_flags
;
1070 union xpc_channel_ctl_flags chctl
;
1072 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1073 chctl
= part
->chctl
;
1074 if (chctl
.all_flags
!= 0)
1075 part
->chctl
.all_flags
= 0;
1077 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1078 return chctl
.all_flags
;
1081 static enum xp_retval
1082 xpc_allocate_send_msg_slot_uv(struct xpc_channel
*ch
)
1084 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1085 struct xpc_send_msg_slot_uv
*msg_slot
;
1086 unsigned long irq_flags
;
1091 for (nentries
= ch
->local_nentries
; nentries
> 0; nentries
--) {
1092 nbytes
= nentries
* sizeof(struct xpc_send_msg_slot_uv
);
1093 ch_uv
->send_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1094 if (ch_uv
->send_msg_slots
== NULL
)
1097 for (entry
= 0; entry
< nentries
; entry
++) {
1098 msg_slot
= &ch_uv
->send_msg_slots
[entry
];
1100 msg_slot
->msg_slot_number
= entry
;
1101 xpc_put_fifo_entry_uv(&ch_uv
->msg_slot_free_list
,
1105 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1106 if (nentries
< ch
->local_nentries
)
1107 ch
->local_nentries
= nentries
;
1108 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1115 static enum xp_retval
1116 xpc_allocate_recv_msg_slot_uv(struct xpc_channel
*ch
)
1118 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1119 struct xpc_notify_mq_msg_uv
*msg_slot
;
1120 unsigned long irq_flags
;
1125 for (nentries
= ch
->remote_nentries
; nentries
> 0; nentries
--) {
1126 nbytes
= nentries
* ch
->entry_size
;
1127 ch_uv
->recv_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1128 if (ch_uv
->recv_msg_slots
== NULL
)
1131 for (entry
= 0; entry
< nentries
; entry
++) {
1132 msg_slot
= ch_uv
->recv_msg_slots
+
1133 entry
* ch
->entry_size
;
1135 msg_slot
->hdr
.msg_slot_number
= entry
;
1138 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1139 if (nentries
< ch
->remote_nentries
)
1140 ch
->remote_nentries
= nentries
;
1141 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1149 * Allocate msg_slots associated with the channel.
1151 static enum xp_retval
1152 xpc_setup_msg_structures_uv(struct xpc_channel
*ch
)
1154 static enum xp_retval ret
;
1155 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1157 DBUG_ON(ch
->flags
& XPC_C_SETUP
);
1159 ch_uv
->cached_notify_gru_mq_desc
= kmalloc(sizeof(struct
1160 gru_message_queue_desc
),
1162 if (ch_uv
->cached_notify_gru_mq_desc
== NULL
)
1165 ret
= xpc_allocate_send_msg_slot_uv(ch
);
1166 if (ret
== xpSuccess
) {
1168 ret
= xpc_allocate_recv_msg_slot_uv(ch
);
1169 if (ret
!= xpSuccess
) {
1170 kfree(ch_uv
->send_msg_slots
);
1171 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1178 * Free up msg_slots and clear other stuff that were setup for the specified
1182 xpc_teardown_msg_structures_uv(struct xpc_channel
*ch
)
1184 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1186 lockdep_assert_held(&ch
->lock
);
1188 kfree(ch_uv
->cached_notify_gru_mq_desc
);
1189 ch_uv
->cached_notify_gru_mq_desc
= NULL
;
1191 if (ch
->flags
& XPC_C_SETUP
) {
1192 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1193 kfree(ch_uv
->send_msg_slots
);
1194 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1195 kfree(ch_uv
->recv_msg_slots
);
1200 xpc_send_chctl_closerequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1202 struct xpc_activate_mq_msg_chctl_closerequest_uv msg
;
1204 msg
.ch_number
= ch
->number
;
1205 msg
.reason
= ch
->reason
;
1206 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1207 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
);
1211 xpc_send_chctl_closereply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1213 struct xpc_activate_mq_msg_chctl_closereply_uv msg
;
1215 msg
.ch_number
= ch
->number
;
1216 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1217 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
);
1221 xpc_send_chctl_openrequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1223 struct xpc_activate_mq_msg_chctl_openrequest_uv msg
;
1225 msg
.ch_number
= ch
->number
;
1226 msg
.entry_size
= ch
->entry_size
;
1227 msg
.local_nentries
= ch
->local_nentries
;
1228 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1229 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
);
1233 xpc_send_chctl_openreply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1235 struct xpc_activate_mq_msg_chctl_openreply_uv msg
;
1237 msg
.ch_number
= ch
->number
;
1238 msg
.local_nentries
= ch
->local_nentries
;
1239 msg
.remote_nentries
= ch
->remote_nentries
;
1240 msg
.notify_gru_mq_desc_gpa
= uv_gpa(xpc_notify_mq_uv
->gru_mq_desc
);
1241 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1242 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
);
1246 xpc_send_chctl_opencomplete_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1248 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg
;
1250 msg
.ch_number
= ch
->number
;
1251 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1252 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV
);
1256 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition
*part
, int ch_number
)
1258 unsigned long irq_flags
;
1260 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1261 part
->chctl
.flags
[ch_number
] |= XPC_CHCTL_MSGREQUEST
;
1262 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1264 xpc_wakeup_channel_mgr(part
);
1267 static enum xp_retval
1268 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel
*ch
,
1269 unsigned long gru_mq_desc_gpa
)
1271 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1273 DBUG_ON(ch_uv
->cached_notify_gru_mq_desc
== NULL
);
1274 return xpc_cache_remote_gru_mq_desc_uv(ch_uv
->cached_notify_gru_mq_desc
,
1279 xpc_indicate_partition_engaged_uv(struct xpc_partition
*part
)
1281 struct xpc_activate_mq_msg_uv msg
;
1283 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1284 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
);
1288 xpc_indicate_partition_disengaged_uv(struct xpc_partition
*part
)
1290 struct xpc_activate_mq_msg_uv msg
;
1292 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1293 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
);
1297 xpc_assume_partition_disengaged_uv(short partid
)
1299 struct xpc_partition_uv
*part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1300 unsigned long irq_flags
;
1302 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
1303 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
1304 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
1308 xpc_partition_engaged_uv(short partid
)
1310 return (xpc_partitions
[partid
].sn
.uv
.flags
& XPC_P_ENGAGED_UV
) != 0;
1314 xpc_any_partition_engaged_uv(void)
1316 struct xpc_partition_uv
*part_uv
;
1319 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
1320 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1321 if ((part_uv
->flags
& XPC_P_ENGAGED_UV
) != 0)
1327 static enum xp_retval
1328 xpc_allocate_msg_slot_uv(struct xpc_channel
*ch
, u32 flags
,
1329 struct xpc_send_msg_slot_uv
**address_of_msg_slot
)
1332 struct xpc_send_msg_slot_uv
*msg_slot
;
1333 struct xpc_fifo_entry_uv
*entry
;
1336 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
);
1340 if (flags
& XPC_NOWAIT
)
1343 ret
= xpc_allocate_msg_wait(ch
);
1344 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
1348 msg_slot
= container_of(entry
, struct xpc_send_msg_slot_uv
, next
);
1349 *address_of_msg_slot
= msg_slot
;
1354 xpc_free_msg_slot_uv(struct xpc_channel
*ch
,
1355 struct xpc_send_msg_slot_uv
*msg_slot
)
1357 xpc_put_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
, &msg_slot
->next
);
1359 /* wakeup anyone waiting for a free msg slot */
1360 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1361 wake_up(&ch
->msg_allocate_wq
);
1365 xpc_notify_sender_uv(struct xpc_channel
*ch
,
1366 struct xpc_send_msg_slot_uv
*msg_slot
,
1367 enum xp_retval reason
)
1369 xpc_notify_func func
= msg_slot
->func
;
1371 if (func
!= NULL
&& cmpxchg(&msg_slot
->func
, func
, NULL
) == func
) {
1373 atomic_dec(&ch
->n_to_notify
);
1375 dev_dbg(xpc_chan
, "msg_slot->func() called, msg_slot=0x%p "
1376 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1377 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1379 func(reason
, ch
->partid
, ch
->number
, msg_slot
->key
);
1381 dev_dbg(xpc_chan
, "msg_slot->func() returned, msg_slot=0x%p "
1382 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1383 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1388 xpc_handle_notify_mq_ack_uv(struct xpc_channel
*ch
,
1389 struct xpc_notify_mq_msg_uv
*msg
)
1391 struct xpc_send_msg_slot_uv
*msg_slot
;
1392 int entry
= msg
->hdr
.msg_slot_number
% ch
->local_nentries
;
1394 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1396 BUG_ON(msg_slot
->msg_slot_number
!= msg
->hdr
.msg_slot_number
);
1397 msg_slot
->msg_slot_number
+= ch
->local_nentries
;
1399 if (msg_slot
->func
!= NULL
)
1400 xpc_notify_sender_uv(ch
, msg_slot
, xpMsgDelivered
);
1402 xpc_free_msg_slot_uv(ch
, msg_slot
);
1406 xpc_handle_notify_mq_msg_uv(struct xpc_partition
*part
,
1407 struct xpc_notify_mq_msg_uv
*msg
)
1409 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
1410 struct xpc_channel
*ch
;
1411 struct xpc_channel_uv
*ch_uv
;
1412 struct xpc_notify_mq_msg_uv
*msg_slot
;
1413 unsigned long irq_flags
;
1414 int ch_number
= msg
->hdr
.ch_number
;
1416 if (unlikely(ch_number
>= part
->nchannels
)) {
1417 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received invalid "
1418 "channel number=0x%x in message from partid=%d\n",
1419 ch_number
, XPC_PARTID(part
));
1421 /* get hb checker to deactivate from the remote partition */
1422 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1423 if (part_uv
->act_state_req
== 0)
1424 xpc_activate_IRQ_rcvd
++;
1425 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
1426 part_uv
->reason
= xpBadChannelNumber
;
1427 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1429 wake_up_interruptible(&xpc_activate_IRQ_wq
);
1433 ch
= &part
->channels
[ch_number
];
1434 xpc_msgqueue_ref(ch
);
1436 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1437 xpc_msgqueue_deref(ch
);
1441 /* see if we're really dealing with an ACK for a previously sent msg */
1442 if (msg
->hdr
.size
== 0) {
1443 xpc_handle_notify_mq_ack_uv(ch
, msg
);
1444 xpc_msgqueue_deref(ch
);
1448 /* we're dealing with a normal message sent via the notify_mq */
1451 msg_slot
= ch_uv
->recv_msg_slots
+
1452 (msg
->hdr
.msg_slot_number
% ch
->remote_nentries
) * ch
->entry_size
;
1454 BUG_ON(msg_slot
->hdr
.size
!= 0);
1456 memcpy(msg_slot
, msg
, msg
->hdr
.size
);
1458 xpc_put_fifo_entry_uv(&ch_uv
->recv_msg_list
, &msg_slot
->hdr
.u
.next
);
1460 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
) {
1462 * If there is an existing idle kthread get it to deliver
1463 * the payload, otherwise we'll have to get the channel mgr
1464 * for this partition to create a kthread to do the delivery.
1466 if (atomic_read(&ch
->kthreads_idle
) > 0)
1467 wake_up_nr(&ch
->idle_wq
, 1);
1469 xpc_send_chctl_local_msgrequest_uv(part
, ch
->number
);
1471 xpc_msgqueue_deref(ch
);
1475 xpc_handle_notify_IRQ_uv(int irq
, void *dev_id
)
1477 struct xpc_notify_mq_msg_uv
*msg
;
1479 struct xpc_partition
*part
;
1481 while ((msg
= gru_get_next_message(xpc_notify_mq_uv
->gru_mq_desc
)) !=
1484 partid
= msg
->hdr
.partid
;
1485 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
1486 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received "
1487 "invalid partid=0x%x in message\n", partid
);
1489 part
= &xpc_partitions
[partid
];
1491 if (xpc_part_ref(part
)) {
1492 xpc_handle_notify_mq_msg_uv(part
, msg
);
1493 xpc_part_deref(part
);
1497 gru_free_message(xpc_notify_mq_uv
->gru_mq_desc
, msg
);
1504 xpc_n_of_deliverable_payloads_uv(struct xpc_channel
*ch
)
1506 return xpc_n_of_fifo_entries_uv(&ch
->sn
.uv
.recv_msg_list
);
1510 xpc_process_msg_chctl_flags_uv(struct xpc_partition
*part
, int ch_number
)
1512 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1513 int ndeliverable_payloads
;
1515 xpc_msgqueue_ref(ch
);
1517 ndeliverable_payloads
= xpc_n_of_deliverable_payloads_uv(ch
);
1519 if (ndeliverable_payloads
> 0 &&
1520 (ch
->flags
& XPC_C_CONNECTED
) &&
1521 (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)) {
1523 xpc_activate_kthreads(ch
, ndeliverable_payloads
);
1526 xpc_msgqueue_deref(ch
);
1529 static enum xp_retval
1530 xpc_send_payload_uv(struct xpc_channel
*ch
, u32 flags
, void *payload
,
1531 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
1534 enum xp_retval ret
= xpSuccess
;
1535 struct xpc_send_msg_slot_uv
*msg_slot
= NULL
;
1536 struct xpc_notify_mq_msg_uv
*msg
;
1537 u8 msg_buffer
[XPC_NOTIFY_MSG_SIZE_UV
];
1540 DBUG_ON(notify_type
!= XPC_N_CALL
);
1542 msg_size
= sizeof(struct xpc_notify_mq_msghdr_uv
) + payload_size
;
1543 if (msg_size
> ch
->entry_size
)
1544 return xpPayloadTooBig
;
1546 xpc_msgqueue_ref(ch
);
1548 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1552 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1553 ret
= xpNotConnected
;
1557 ret
= xpc_allocate_msg_slot_uv(ch
, flags
, &msg_slot
);
1558 if (ret
!= xpSuccess
)
1562 atomic_inc(&ch
->n_to_notify
);
1564 msg_slot
->key
= key
;
1565 smp_wmb(); /* a non-NULL func must hit memory after the key */
1566 msg_slot
->func
= func
;
1568 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1574 msg
= (struct xpc_notify_mq_msg_uv
*)&msg_buffer
;
1575 msg
->hdr
.partid
= xp_partition_id
;
1576 msg
->hdr
.ch_number
= ch
->number
;
1577 msg
->hdr
.size
= msg_size
;
1578 msg
->hdr
.msg_slot_number
= msg_slot
->msg_slot_number
;
1579 memcpy(&msg
->payload
, payload
, payload_size
);
1581 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1583 if (ret
== xpSuccess
)
1586 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1590 * Try to NULL the msg_slot's func field. If we fail, then
1591 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1592 * case we need to pretend we succeeded to send the message
1593 * since the user will get a callout for the disconnect error
1594 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1595 * error returned here will confuse them. Additionally, since
1596 * in this case the channel is being disconnected we don't need
1597 * to put the the msg_slot back on the free list.
1599 if (cmpxchg(&msg_slot
->func
, func
, NULL
) != func
) {
1604 msg_slot
->key
= NULL
;
1605 atomic_dec(&ch
->n_to_notify
);
1607 xpc_free_msg_slot_uv(ch
, msg_slot
);
1609 xpc_msgqueue_deref(ch
);
1614 * Tell the callers of xpc_send_notify() that the status of their payloads
1615 * is unknown because the channel is now disconnecting.
1617 * We don't worry about putting these msg_slots on the free list since the
1618 * msg_slots themselves are about to be kfree'd.
1621 xpc_notify_senders_of_disconnect_uv(struct xpc_channel
*ch
)
1623 struct xpc_send_msg_slot_uv
*msg_slot
;
1626 DBUG_ON(!(ch
->flags
& XPC_C_DISCONNECTING
));
1628 for (entry
= 0; entry
< ch
->local_nentries
; entry
++) {
1630 if (atomic_read(&ch
->n_to_notify
) == 0)
1633 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1634 if (msg_slot
->func
!= NULL
)
1635 xpc_notify_sender_uv(ch
, msg_slot
, ch
->reason
);
1640 * Get the next deliverable message's payload.
1643 xpc_get_deliverable_payload_uv(struct xpc_channel
*ch
)
1645 struct xpc_fifo_entry_uv
*entry
;
1646 struct xpc_notify_mq_msg_uv
*msg
;
1647 void *payload
= NULL
;
1649 if (!(ch
->flags
& XPC_C_DISCONNECTING
)) {
1650 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.recv_msg_list
);
1651 if (entry
!= NULL
) {
1652 msg
= container_of(entry
, struct xpc_notify_mq_msg_uv
,
1654 payload
= &msg
->payload
;
1661 xpc_received_payload_uv(struct xpc_channel
*ch
, void *payload
)
1663 struct xpc_notify_mq_msg_uv
*msg
;
1666 msg
= container_of(payload
, struct xpc_notify_mq_msg_uv
, payload
);
1668 /* return an ACK to the sender of this message */
1670 msg
->hdr
.partid
= xp_partition_id
;
1671 msg
->hdr
.size
= 0; /* size of zero indicates this is an ACK */
1673 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1674 sizeof(struct xpc_notify_mq_msghdr_uv
));
1675 if (ret
!= xpSuccess
)
1676 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1679 static struct xpc_arch_operations xpc_arch_ops_uv
= {
1680 .setup_partitions
= xpc_setup_partitions_uv
,
1681 .teardown_partitions
= xpc_teardown_partitions_uv
,
1682 .process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_uv
,
1683 .get_partition_rsvd_page_pa
= xpc_get_partition_rsvd_page_pa_uv
,
1684 .setup_rsvd_page
= xpc_setup_rsvd_page_uv
,
1686 .allow_hb
= xpc_allow_hb_uv
,
1687 .disallow_hb
= xpc_disallow_hb_uv
,
1688 .disallow_all_hbs
= xpc_disallow_all_hbs_uv
,
1689 .increment_heartbeat
= xpc_increment_heartbeat_uv
,
1690 .offline_heartbeat
= xpc_offline_heartbeat_uv
,
1691 .online_heartbeat
= xpc_online_heartbeat_uv
,
1692 .heartbeat_init
= xpc_heartbeat_init_uv
,
1693 .heartbeat_exit
= xpc_heartbeat_exit_uv
,
1694 .get_remote_heartbeat
= xpc_get_remote_heartbeat_uv
,
1696 .request_partition_activation
=
1697 xpc_request_partition_activation_uv
,
1698 .request_partition_reactivation
=
1699 xpc_request_partition_reactivation_uv
,
1700 .request_partition_deactivation
=
1701 xpc_request_partition_deactivation_uv
,
1702 .cancel_partition_deactivation_request
=
1703 xpc_cancel_partition_deactivation_request_uv
,
1705 .setup_ch_structures
= xpc_setup_ch_structures_uv
,
1706 .teardown_ch_structures
= xpc_teardown_ch_structures_uv
,
1708 .make_first_contact
= xpc_make_first_contact_uv
,
1710 .get_chctl_all_flags
= xpc_get_chctl_all_flags_uv
,
1711 .send_chctl_closerequest
= xpc_send_chctl_closerequest_uv
,
1712 .send_chctl_closereply
= xpc_send_chctl_closereply_uv
,
1713 .send_chctl_openrequest
= xpc_send_chctl_openrequest_uv
,
1714 .send_chctl_openreply
= xpc_send_chctl_openreply_uv
,
1715 .send_chctl_opencomplete
= xpc_send_chctl_opencomplete_uv
,
1716 .process_msg_chctl_flags
= xpc_process_msg_chctl_flags_uv
,
1718 .save_remote_msgqueue_pa
= xpc_save_remote_msgqueue_pa_uv
,
1720 .setup_msg_structures
= xpc_setup_msg_structures_uv
,
1721 .teardown_msg_structures
= xpc_teardown_msg_structures_uv
,
1723 .indicate_partition_engaged
= xpc_indicate_partition_engaged_uv
,
1724 .indicate_partition_disengaged
= xpc_indicate_partition_disengaged_uv
,
1725 .assume_partition_disengaged
= xpc_assume_partition_disengaged_uv
,
1726 .partition_engaged
= xpc_partition_engaged_uv
,
1727 .any_partition_engaged
= xpc_any_partition_engaged_uv
,
1729 .n_of_deliverable_payloads
= xpc_n_of_deliverable_payloads_uv
,
1730 .send_payload
= xpc_send_payload_uv
,
1731 .get_deliverable_payload
= xpc_get_deliverable_payload_uv
,
1732 .received_payload
= xpc_received_payload_uv
,
1733 .notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_uv
,
1737 xpc_init_mq_node(int nid
)
1743 for_each_cpu(cpu
, cpumask_of_node(nid
)) {
1744 xpc_activate_mq_uv
=
1745 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV
, nid
,
1746 XPC_ACTIVATE_IRQ_NAME
,
1747 xpc_handle_activate_IRQ_uv
);
1748 if (!IS_ERR(xpc_activate_mq_uv
))
1751 if (IS_ERR(xpc_activate_mq_uv
)) {
1753 return PTR_ERR(xpc_activate_mq_uv
);
1756 for_each_cpu(cpu
, cpumask_of_node(nid
)) {
1758 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV
, nid
,
1759 XPC_NOTIFY_IRQ_NAME
,
1760 xpc_handle_notify_IRQ_uv
);
1761 if (!IS_ERR(xpc_notify_mq_uv
))
1764 if (IS_ERR(xpc_notify_mq_uv
)) {
1765 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);
1767 return PTR_ERR(xpc_notify_mq_uv
);
1780 xpc_arch_ops
= xpc_arch_ops_uv
;
1782 if (sizeof(struct xpc_notify_mq_msghdr_uv
) > XPC_MSG_HDR_MAX_SIZE
) {
1783 dev_err(xpc_part
, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1784 XPC_MSG_HDR_MAX_SIZE
);
1788 if (xpc_mq_node
< 0)
1789 for_each_online_node(nid
) {
1790 ret
= xpc_init_mq_node(nid
);
1796 ret
= xpc_init_mq_node(xpc_mq_node
);
1799 dev_err(xpc_part
, "xpc_init_mq_node() returned error=%d\n",
1808 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv
);
1809 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);
1812 module_param(xpc_mq_node
, int, 0);
1813 MODULE_PARM_DESC(xpc_mq_node
, "Node number on which to allocate message queues.");