1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SN Platform GRU Driver
5 * KERNEL SERVICES THAT USE THE GRU
7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/proc_fs.h>
18 #include <linux/interrupt.h>
19 #include <linux/uaccess.h>
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <asm/io_apic.h>
25 #include "grutables.h"
26 #include "grukservices.h"
27 #include "gru_instructions.h"
28 #include <asm/uv/uv_hub.h>
33 * The following is an interim algorithm for management of kernel GRU
34 * resources. This will likely be replaced when we better understand the
35 * kernel/user requirements.
37 * Blade percpu resources reserved for kernel use. These resources are
38 * reserved whenever the the kernel context for the blade is loaded. Note
39 * that the kernel context is not guaranteed to be always available. It is
40 * loaded on demand & can be stolen by a user if the user demand exceeds the
41 * kernel demand. The kernel can always reload the kernel context but
42 * a SLEEP may be required!!!.
46 * Each blade has one "kernel context" that owns GRU kernel resources
47 * located on the blade. Kernel drivers use GRU resources in this context
48 * for sending messages, zeroing memory, etc.
50 * The kernel context is dynamically loaded on demand. If it is not in
51 * use by the kernel, the kernel context can be unloaded & given to a user.
52 * The kernel context will be reloaded when needed. This may require that
53 * a context be stolen from a user.
54 * NOTE: frequent unloading/reloading of the kernel context is
55 * expensive. We are depending on batch schedulers, cpusets, sane
56 * drivers or some other mechanism to prevent the need for frequent
59 * The kernel context consists of two parts:
60 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
61 * Each cpu has it's own private resources & does not share them
62 * with other cpus. These resources are used serially, ie,
63 * locked, used & unlocked on each call to a function in
65 * (Now that we have dynamic loading of kernel contexts, I
66 * may rethink this & allow sharing between cpus....)
68 * - Additional resources can be reserved long term & used directly
69 * by UV drivers located in the kernel. Drivers using these GRU
70 * resources can use asynchronous GRU instructions that send
71 * interrupts on completion.
72 * - these resources must be explicitly locked/unlocked
73 * - locked resources prevent (obviously) the kernel
74 * context from being unloaded.
75 * - drivers using these resource directly issue their own
76 * GRU instruction and must wait/check completion.
78 * When these resources are reserved, the caller can optionally
79 * associate a wait_queue with the resources and use asynchronous
80 * GRU instructions. When an async GRU instruction completes, the
81 * driver will do a wakeup on the event.
86 #define ASYNC_HAN_TO_BID(h) ((h) - 1)
87 #define ASYNC_BID_TO_HAN(b) ((b) + 1)
88 #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
90 #define GRU_NUM_KERNEL_CBR 1
91 #define GRU_NUM_KERNEL_DSR_BYTES 256
92 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
95 /* GRU instruction attributes for all instructions */
96 #define IMA IMA_CB_DELAY
98 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
99 #define __gru_cacheline_aligned__ \
100 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
102 #define MAGIC 0x1234567887654321UL
104 /* Default retry count for GRU errors on kernel instructions */
105 #define EXCEPTION_RETRY_LIMIT 3
107 /* Status of message queue sections */
112 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
113 /* optimized for x86_64 */
114 struct message_queue
{
115 union gru_mesqhead head __gru_cacheline_aligned__
; /* CL 0 */
116 int qlines
; /* DW 1 */
118 void *next __gru_cacheline_aligned__
;/* CL 1 */
122 char data ____cacheline_aligned
; /* CL 2 */
125 /* First word in every message - used by mesq interface */
126 struct message_header
{
133 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
136 * Reload the blade's kernel context into a GRU chiplet. Called holding
137 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
139 static void gru_load_kernel_context(struct gru_blade_state
*bs
, int blade_id
)
141 struct gru_state
*gru
;
142 struct gru_thread_state
*kgts
;
146 up_read(&bs
->bs_kgts_sema
);
147 down_write(&bs
->bs_kgts_sema
);
151 bs
->bs_kgts
= gru_alloc_gts(NULL
, 0, 0, 0, 0, 0);
152 if (!IS_ERR(bs
->bs_kgts
))
156 bs
->bs_kgts
->ts_user_blade_id
= blade_id
;
161 STAT(load_kernel_context
);
162 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
163 kgts
->ts_cbr_au_count
= GRU_CB_COUNT_TO_AU(
164 GRU_NUM_KERNEL_CBR
* ncpus
+ bs
->bs_async_cbrs
);
165 kgts
->ts_dsr_au_count
= GRU_DS_BYTES_TO_AU(
166 GRU_NUM_KERNEL_DSR_BYTES
* ncpus
+
167 bs
->bs_async_dsr_bytes
);
168 while (!gru_assign_gru_context(kgts
)) {
170 gru_steal_context(kgts
);
172 gru_load_context(kgts
);
173 gru
= bs
->bs_kgts
->ts_gru
;
174 vaddr
= gru
->gs_gru_base_vaddr
;
175 ctxnum
= kgts
->ts_ctxnum
;
176 bs
->kernel_cb
= get_gseg_base_address_cb(vaddr
, ctxnum
, 0);
177 bs
->kernel_dsr
= get_gseg_base_address_ds(vaddr
, ctxnum
, 0);
179 downgrade_write(&bs
->bs_kgts_sema
);
183 * Free all kernel contexts that are not currently in use.
184 * Returns 0 if all freed, else number of inuse context.
186 static int gru_free_kernel_contexts(void)
188 struct gru_blade_state
*bs
;
189 struct gru_thread_state
*kgts
;
192 for (bid
= 0; bid
< GRU_MAX_BLADES
; bid
++) {
197 /* Ignore busy contexts. Don't want to block here. */
198 if (down_write_trylock(&bs
->bs_kgts_sema
)) {
200 if (kgts
&& kgts
->ts_gru
)
201 gru_unload_context(kgts
, 0);
203 up_write(&bs
->bs_kgts_sema
);
213 * Lock & load the kernel context for the specified blade.
215 static struct gru_blade_state
*gru_lock_kernel_context(int blade_id
)
217 struct gru_blade_state
*bs
;
220 STAT(lock_kernel_context
);
222 bid
= blade_id
< 0 ? uv_numa_blade_id() : blade_id
;
225 /* Handle the case where migration occurred while waiting for the sema */
226 down_read(&bs
->bs_kgts_sema
);
227 if (blade_id
< 0 && bid
!= uv_numa_blade_id()) {
228 up_read(&bs
->bs_kgts_sema
);
231 if (!bs
->bs_kgts
|| !bs
->bs_kgts
->ts_gru
)
232 gru_load_kernel_context(bs
, bid
);
238 * Unlock the kernel context for the specified blade. Context is not
239 * unloaded but may be stolen before next use.
241 static void gru_unlock_kernel_context(int blade_id
)
243 struct gru_blade_state
*bs
;
245 bs
= gru_base
[blade_id
];
246 up_read(&bs
->bs_kgts_sema
);
247 STAT(unlock_kernel_context
);
251 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
252 * - returns with preemption disabled
254 static int gru_get_cpu_resources(int dsr_bytes
, void **cb
, void **dsr
)
256 struct gru_blade_state
*bs
;
259 BUG_ON(dsr_bytes
> GRU_NUM_KERNEL_DSR_BYTES
);
261 bs
= gru_lock_kernel_context(-1);
262 lcpu
= uv_blade_processor_id();
263 *cb
= bs
->kernel_cb
+ lcpu
* GRU_HANDLE_STRIDE
;
264 *dsr
= bs
->kernel_dsr
+ lcpu
* GRU_NUM_KERNEL_DSR_BYTES
;
269 * Free the current cpus reserved DSR/CBR resources.
271 static void gru_free_cpu_resources(void *cb
, void *dsr
)
273 gru_unlock_kernel_context(uv_numa_blade_id());
278 * Reserve GRU resources to be used asynchronously.
279 * Note: currently supports only 1 reservation per blade.
282 * blade_id - blade on which resources should be reserved
283 * cbrs - number of CBRs
284 * dsr_bytes - number of DSR bytes needed
286 * handle to identify resource
287 * (0 = async resources already reserved)
289 unsigned long gru_reserve_async_resources(int blade_id
, int cbrs
, int dsr_bytes
,
290 struct completion
*cmp
)
292 struct gru_blade_state
*bs
;
293 struct gru_thread_state
*kgts
;
296 bs
= gru_base
[blade_id
];
298 down_write(&bs
->bs_kgts_sema
);
300 /* Verify no resources already reserved */
301 if (bs
->bs_async_dsr_bytes
+ bs
->bs_async_cbrs
)
303 bs
->bs_async_dsr_bytes
= dsr_bytes
;
304 bs
->bs_async_cbrs
= cbrs
;
305 bs
->bs_async_wq
= cmp
;
308 /* Resources changed. Unload context if already loaded */
309 if (kgts
&& kgts
->ts_gru
)
310 gru_unload_context(kgts
, 0);
311 ret
= ASYNC_BID_TO_HAN(blade_id
);
314 up_write(&bs
->bs_kgts_sema
);
319 * Release async resources previously reserved.
322 * han - handle to identify resources
324 void gru_release_async_resources(unsigned long han
)
326 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
328 down_write(&bs
->bs_kgts_sema
);
329 bs
->bs_async_dsr_bytes
= 0;
330 bs
->bs_async_cbrs
= 0;
331 bs
->bs_async_wq
= NULL
;
332 up_write(&bs
->bs_kgts_sema
);
336 * Wait for async GRU instructions to complete.
339 * han - handle to identify resources
341 void gru_wait_async_cbr(unsigned long han
)
343 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
345 wait_for_completion(bs
->bs_async_wq
);
350 * Lock previous reserved async GRU resources
353 * han - handle to identify resources
355 * cb - pointer to first CBR
356 * dsr - pointer to first DSR
358 void gru_lock_async_resource(unsigned long han
, void **cb
, void **dsr
)
360 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
361 int blade_id
= ASYNC_HAN_TO_BID(han
);
364 gru_lock_kernel_context(blade_id
);
365 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
367 *cb
= bs
->kernel_cb
+ ncpus
* GRU_HANDLE_STRIDE
;
369 *dsr
= bs
->kernel_dsr
+ ncpus
* GRU_NUM_KERNEL_DSR_BYTES
;
373 * Unlock previous reserved async GRU resources
376 * han - handle to identify resources
378 void gru_unlock_async_resource(unsigned long han
)
380 int blade_id
= ASYNC_HAN_TO_BID(han
);
382 gru_unlock_kernel_context(blade_id
);
385 /*----------------------------------------------------------------------*/
386 int gru_get_cb_exception_detail(void *cb
,
387 struct control_block_extended_exc_detail
*excdet
)
389 struct gru_control_block_extended
*cbe
;
390 struct gru_thread_state
*kgts
= NULL
;
395 * Locate kgts for cb. This algorithm is SLOW but
396 * this function is rarely called (ie., almost never).
397 * Performance does not matter.
399 for_each_possible_blade(bid
) {
402 kgts
= gru_base
[bid
]->bs_kgts
;
403 if (!kgts
|| !kgts
->ts_gru
)
405 off
= cb
- kgts
->ts_gru
->gs_gru_base_vaddr
;
411 cbrnum
= thread_cbr_number(kgts
, get_cb_number(cb
));
412 cbe
= get_cbe(GRUBASE(cb
), cbrnum
);
413 gru_flush_cache(cbe
); /* CBE not coherent */
415 excdet
->opc
= cbe
->opccpy
;
416 excdet
->exopc
= cbe
->exopccpy
;
417 excdet
->ecause
= cbe
->ecause
;
418 excdet
->exceptdet0
= cbe
->idef1upd
;
419 excdet
->exceptdet1
= cbe
->idef3upd
;
420 gru_flush_cache(cbe
);
424 static char *gru_get_cb_exception_detail_str(int ret
, void *cb
,
427 struct gru_control_block_status
*gen
= (void *)cb
;
428 struct control_block_extended_exc_detail excdet
;
430 if (ret
> 0 && gen
->istatus
== CBS_EXCEPTION
) {
431 gru_get_cb_exception_detail(cb
, &excdet
);
433 "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
434 "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
435 gen
, excdet
.opc
, excdet
.exopc
, excdet
.ecause
,
436 excdet
.exceptdet0
, excdet
.exceptdet1
);
438 snprintf(buf
, size
, "No exception");
443 static int gru_wait_idle_or_exception(struct gru_control_block_status
*gen
)
445 while (gen
->istatus
>= CBS_ACTIVE
) {
452 static int gru_retry_exception(void *cb
)
454 struct gru_control_block_status
*gen
= (void *)cb
;
455 struct control_block_extended_exc_detail excdet
;
456 int retry
= EXCEPTION_RETRY_LIMIT
;
459 if (gru_wait_idle_or_exception(gen
) == CBS_IDLE
)
461 if (gru_get_cb_message_queue_substatus(cb
))
462 return CBS_EXCEPTION
;
463 gru_get_cb_exception_detail(cb
, &excdet
);
464 if ((excdet
.ecause
& ~EXCEPTION_RETRY_BITS
) ||
465 (excdet
.cbrexecstatus
& CBR_EXS_ABORT_OCC
))
470 gru_flush_cache(gen
);
472 return CBS_EXCEPTION
;
475 int gru_check_status_proc(void *cb
)
477 struct gru_control_block_status
*gen
= (void *)cb
;
481 if (ret
== CBS_EXCEPTION
)
482 ret
= gru_retry_exception(cb
);
488 int gru_wait_proc(void *cb
)
490 struct gru_control_block_status
*gen
= (void *)cb
;
493 ret
= gru_wait_idle_or_exception(gen
);
494 if (ret
== CBS_EXCEPTION
)
495 ret
= gru_retry_exception(cb
);
500 static void gru_abort(int ret
, void *cb
, char *str
)
502 char buf
[GRU_EXC_STR_SIZE
];
504 panic("GRU FATAL ERROR: %s - %s\n", str
,
505 gru_get_cb_exception_detail_str(ret
, cb
, buf
, sizeof(buf
)));
508 void gru_wait_abort_proc(void *cb
)
512 ret
= gru_wait_proc(cb
);
514 gru_abort(ret
, cb
, "gru_wait_abort");
518 /*------------------------------ MESSAGE QUEUES -----------------------------*/
520 /* Internal status . These are NOT returned to the user. */
521 #define MQIE_AGAIN -1 /* try again */
525 * Save/restore the "present" flag that is in the second line of 2-line
528 static inline int get_present2(void *p
)
530 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
531 return mhdr
->present
;
534 static inline void restore_present2(void *p
, int val
)
536 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
541 * Create a message queue.
542 * qlines - message queue size in cache lines. Includes 2-line header.
544 int gru_create_message_queue(struct gru_message_queue_desc
*mqd
,
545 void *p
, unsigned int bytes
, int nasid
, int vector
, int apicid
)
547 struct message_queue
*mq
= p
;
550 qlines
= bytes
/ GRU_CACHE_LINE_BYTES
- 2;
551 memset(mq
, 0, bytes
);
552 mq
->start
= &mq
->data
;
553 mq
->start2
= &mq
->data
+ (qlines
/ 2 - 1) * GRU_CACHE_LINE_BYTES
;
554 mq
->next
= &mq
->data
;
555 mq
->limit
= &mq
->data
+ (qlines
- 2) * GRU_CACHE_LINE_BYTES
;
559 mq
->head
= gru_mesq_head(2, qlines
/ 2 + 1);
561 mqd
->mq_gpa
= uv_gpa(mq
);
562 mqd
->qlines
= qlines
;
563 mqd
->interrupt_pnode
= nasid
>> 1;
564 mqd
->interrupt_vector
= vector
;
565 mqd
->interrupt_apicid
= apicid
;
568 EXPORT_SYMBOL_GPL(gru_create_message_queue
);
571 * Send a NOOP message to a message queue
573 * 0 - if queue is full after the send. This is the normal case
574 * but various races can change this.
575 * -1 - if mesq sent successfully but queue not full
576 * >0 - unexpected error. MQE_xxx returned
578 static int send_noop_message(void *cb
, struct gru_message_queue_desc
*mqd
,
581 const struct message_header noop_header
= {
582 .present
= MQS_NOOP
, .lines
= 1};
585 struct message_header save_mhdr
, *mhdr
= mesg
;
590 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), 1, IMA
);
594 substatus
= gru_get_cb_message_queue_substatus(cb
);
597 STAT(mesq_noop_unexpected_error
);
598 ret
= MQE_UNEXPECTED_CB_ERR
;
600 case CBSS_LB_OVERFLOWED
:
601 STAT(mesq_noop_lb_overflow
);
602 ret
= MQE_CONGESTION
;
604 case CBSS_QLIMIT_REACHED
:
605 STAT(mesq_noop_qlimit_reached
);
608 case CBSS_AMO_NACKED
:
609 STAT(mesq_noop_amo_nacked
);
610 ret
= MQE_CONGESTION
;
612 case CBSS_PUT_NACKED
:
613 STAT(mesq_noop_put_nacked
);
614 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
615 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, 1, 1,
617 if (gru_wait(cb
) == CBS_IDLE
)
620 ret
= MQE_UNEXPECTED_CB_ERR
;
622 case CBSS_PAGE_OVERFLOW
:
623 STAT(mesq_noop_page_overflow
);
634 * Handle a gru_mesq full.
636 static int send_message_queue_full(void *cb
, struct gru_message_queue_desc
*mqd
,
637 void *mesg
, int lines
)
639 union gru_mesqhead mqh
;
640 unsigned int limit
, head
;
641 unsigned long avalue
;
644 /* Determine if switching to first/second half of q */
645 avalue
= gru_get_amo_value(cb
);
646 head
= gru_get_amo_value_head(cb
);
647 limit
= gru_get_amo_value_limit(cb
);
649 qlines
= mqd
->qlines
;
650 half
= (limit
!= qlines
);
653 mqh
= gru_mesq_head(qlines
/ 2 + 1, qlines
);
655 mqh
= gru_mesq_head(2, qlines
/ 2 + 1);
657 /* Try to get lock for switching head pointer */
658 gru_gamir(cb
, EOP_IR_CLR
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
, IMA
);
659 if (gru_wait(cb
) != CBS_IDLE
)
661 if (!gru_get_amo_value(cb
)) {
662 STAT(mesq_qf_locked
);
663 return MQE_QUEUE_FULL
;
666 /* Got the lock. Send optional NOP if queue not full, */
668 if (send_noop_message(cb
, mqd
, mesg
)) {
669 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
),
671 if (gru_wait(cb
) != CBS_IDLE
)
673 STAT(mesq_qf_noop_not_full
);
679 /* Then flip queuehead to other half of queue. */
680 gru_gamer(cb
, EOP_ERR_CSWAP
, mqd
->mq_gpa
, XTYPE_DW
, mqh
.val
, avalue
,
682 if (gru_wait(cb
) != CBS_IDLE
)
685 /* If not successfully in swapping queue head, clear the hstatus lock */
686 if (gru_get_amo_value(cb
) != avalue
) {
687 STAT(mesq_qf_switch_head_failed
);
688 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
,
690 if (gru_wait(cb
) != CBS_IDLE
)
695 STAT(mesq_qf_unexpected_error
);
696 return MQE_UNEXPECTED_CB_ERR
;
700 * Handle a PUT failure. Note: if message was a 2-line message, one of the
701 * lines might have successfully have been written. Before sending the
702 * message, "present" must be cleared in BOTH lines to prevent the receiver
703 * from prematurely seeing the full message.
705 static int send_message_put_nacked(void *cb
, struct gru_message_queue_desc
*mqd
,
706 void *mesg
, int lines
)
709 int ret
, loops
= 200; /* experimentally determined */
711 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
713 gru_vset(cb
, m
, 0, XTYPE_CL
, lines
, 1, IMA
);
714 if (gru_wait(cb
) != CBS_IDLE
)
715 return MQE_UNEXPECTED_CB_ERR
;
717 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, lines
, 1, IMA
);
718 if (gru_wait(cb
) != CBS_IDLE
)
719 return MQE_UNEXPECTED_CB_ERR
;
721 if (!mqd
->interrupt_vector
)
725 * Send a noop message in order to deliver a cross-partition interrupt
726 * to the SSI that contains the target message queue. Normally, the
727 * interrupt is automatically delivered by hardware following mesq
728 * operations, but some error conditions require explicit delivery.
729 * The noop message will trigger delivery. Otherwise partition failures
730 * could cause unrecovered errors.
733 ret
= send_noop_message(cb
, mqd
, mesg
);
734 } while ((ret
== MQIE_AGAIN
|| ret
== MQE_CONGESTION
) && (loops
-- > 0));
736 if (ret
== MQIE_AGAIN
|| ret
== MQE_CONGESTION
) {
738 * Don't indicate to the app to resend the message, as it's
739 * already been successfully sent. We simply send an OK
740 * (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
741 * assuming that the other side is receiving enough
742 * interrupts to get this message processed anyway.
750 * Handle a gru_mesq failure. Some of these failures are software recoverable
753 static int send_message_failure(void *cb
, struct gru_message_queue_desc
*mqd
,
754 void *mesg
, int lines
)
756 int substatus
, ret
= 0;
758 substatus
= gru_get_cb_message_queue_substatus(cb
);
761 STAT(mesq_send_unexpected_error
);
762 ret
= MQE_UNEXPECTED_CB_ERR
;
764 case CBSS_LB_OVERFLOWED
:
765 STAT(mesq_send_lb_overflow
);
766 ret
= MQE_CONGESTION
;
768 case CBSS_QLIMIT_REACHED
:
769 STAT(mesq_send_qlimit_reached
);
770 ret
= send_message_queue_full(cb
, mqd
, mesg
, lines
);
772 case CBSS_AMO_NACKED
:
773 STAT(mesq_send_amo_nacked
);
774 ret
= MQE_CONGESTION
;
776 case CBSS_PUT_NACKED
:
777 STAT(mesq_send_put_nacked
);
778 ret
= send_message_put_nacked(cb
, mqd
, mesg
, lines
);
780 case CBSS_PAGE_OVERFLOW
:
781 STAT(mesq_page_overflow
);
790 * Send a message to a message queue
791 * mqd message queue descriptor
792 * mesg message. ust be vaddr within a GSEG
793 * bytes message size (<= 2 CL)
795 int gru_send_message_gpa(struct gru_message_queue_desc
*mqd
, void *mesg
,
798 struct message_header
*mhdr
;
801 int istatus
, clines
, ret
;
804 BUG_ON(bytes
< sizeof(int) || bytes
> 2 * GRU_CACHE_LINE_BYTES
);
806 clines
= DIV_ROUND_UP(bytes
, GRU_CACHE_LINE_BYTES
);
807 if (gru_get_cpu_resources(bytes
, &cb
, &dsr
))
808 return MQE_BUG_NO_RESOURCES
;
809 memcpy(dsr
, mesg
, bytes
);
811 mhdr
->present
= MQS_FULL
;
812 mhdr
->lines
= clines
;
814 mhdr
->present2
= get_present2(mhdr
);
815 restore_present2(mhdr
, MQS_FULL
);
820 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), clines
, IMA
);
821 istatus
= gru_wait(cb
);
822 if (istatus
!= CBS_IDLE
)
823 ret
= send_message_failure(cb
, mqd
, dsr
, clines
);
824 } while (ret
== MQIE_AGAIN
);
825 gru_free_cpu_resources(cb
, dsr
);
828 STAT(mesq_send_failed
);
831 EXPORT_SYMBOL_GPL(gru_send_message_gpa
);
834 * Advance the receive pointer for the queue to the next message.
836 void gru_free_message(struct gru_message_queue_desc
*mqd
, void *mesg
)
838 struct message_queue
*mq
= mqd
->mq
;
839 struct message_header
*mhdr
= mq
->next
;
842 int lines
= mhdr
->lines
;
845 restore_present2(mhdr
, MQS_EMPTY
);
846 mhdr
->present
= MQS_EMPTY
;
849 next
= pnext
+ GRU_CACHE_LINE_BYTES
* lines
;
850 if (next
== mq
->limit
) {
853 } else if (pnext
< mq
->start2
&& next
>= mq
->start2
) {
858 mq
->hstatus
[half
] = 1;
861 EXPORT_SYMBOL_GPL(gru_free_message
);
864 * Get next message from message queue. Return NULL if no message
865 * present. User must call next_message() to move to next message.
868 void *gru_get_next_message(struct gru_message_queue_desc
*mqd
)
870 struct message_queue
*mq
= mqd
->mq
;
871 struct message_header
*mhdr
= mq
->next
;
872 int present
= mhdr
->present
;
874 /* skip NOOP messages */
875 while (present
== MQS_NOOP
) {
876 gru_free_message(mqd
, mhdr
);
878 present
= mhdr
->present
;
881 /* Wait for both halves of 2 line messages */
882 if (present
== MQS_FULL
&& mhdr
->lines
== 2 &&
883 get_present2(mhdr
) == MQS_EMPTY
)
887 STAT(mesq_receive_none
);
891 if (mhdr
->lines
== 2)
892 restore_present2(mhdr
, mhdr
->present2
);
897 EXPORT_SYMBOL_GPL(gru_get_next_message
);
899 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
902 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
904 int gru_read_gpa(unsigned long *value
, unsigned long gpa
)
911 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES
, &cb
, &dsr
))
912 return MQE_BUG_NO_RESOURCES
;
914 gru_vload_phys(cb
, gpa
, gru_get_tri(dsr
), iaa
, IMA
);
917 *value
= *(unsigned long *)dsr
;
918 gru_free_cpu_resources(cb
, dsr
);
921 EXPORT_SYMBOL_GPL(gru_read_gpa
);
925 * Copy a block of data using the GRU resources
927 int gru_copy_gpa(unsigned long dest_gpa
, unsigned long src_gpa
,
935 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES
, &cb
, &dsr
))
936 return MQE_BUG_NO_RESOURCES
;
937 gru_bcopy(cb
, src_gpa
, dest_gpa
, gru_get_tri(dsr
),
938 XTYPE_B
, bytes
, GRU_NUM_KERNEL_DSR_CL
, IMA
);
940 gru_free_cpu_resources(cb
, dsr
);
943 EXPORT_SYMBOL_GPL(gru_copy_gpa
);
945 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
946 /* Temp - will delete after we gain confidence in the GRU */
948 static int quicktest0(unsigned long arg
)
957 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES
, &cb
, &dsr
))
958 return MQE_BUG_NO_RESOURCES
;
963 gru_vload(cb
, uv_gpa(&word0
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
964 if (gru_wait(cb
) != CBS_IDLE
) {
965 printk(KERN_DEBUG
"GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
970 printk(KERN_DEBUG
"GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p
);
973 gru_vstore(cb
, uv_gpa(&word1
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
974 if (gru_wait(cb
) != CBS_IDLE
) {
975 printk(KERN_DEBUG
"GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
979 if (word0
!= word1
|| word1
!= MAGIC
) {
981 "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
982 smp_processor_id(), word1
, MAGIC
);
988 gru_free_cpu_resources(cb
, dsr
);
992 #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
994 static int quicktest1(unsigned long arg
)
996 struct gru_message_queue_desc mqd
;
999 char mes
[GRU_CACHE_LINE_BYTES
], *m
;
1001 /* Need 1K cacheline aligned that does not cross page boundary */
1002 p
= kmalloc(4096, 0);
1005 mq
= ALIGNUP(p
, 1024);
1006 memset(mes
, 0xee, sizeof(mes
));
1008 gru_create_message_queue(&mqd
, mq
, 8 * GRU_CACHE_LINE_BYTES
, 0, 0, 0);
1009 for (i
= 0; i
< 6; i
++) {
1012 ret
= gru_send_message_gpa(&mqd
, mes
, sizeof(mes
));
1013 } while (ret
== MQE_CONGESTION
);
1017 if (ret
!= MQE_QUEUE_FULL
|| i
!= 4) {
1018 printk(KERN_DEBUG
"GRU:%d quicktest1: unexpect status %d, i %d\n",
1019 smp_processor_id(), ret
, i
);
1023 for (i
= 0; i
< 6; i
++) {
1024 m
= gru_get_next_message(&mqd
);
1025 if (!m
|| m
[8] != i
)
1027 gru_free_message(&mqd
, m
);
1030 printk(KERN_DEBUG
"GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
1031 smp_processor_id(), i
, m
, m
? m
[8] : -1);
1041 static int quicktest2(unsigned long arg
)
1043 static DECLARE_COMPLETION(cmp
);
1050 struct gru_control_block_status
*gen
;
1051 int i
, k
, istatus
, bytes
;
1053 bytes
= numcb
* 4 * 8;
1054 buf
= kmalloc(bytes
, GFP_KERNEL
);
1059 han
= gru_reserve_async_resources(blade_id
, numcb
, 0, &cmp
);
1063 gru_lock_async_resource(han
, &cb0
, NULL
);
1064 memset(buf
, 0xee, bytes
);
1065 for (i
= 0; i
< numcb
; i
++)
1066 gru_vset(cb0
+ i
* GRU_HANDLE_STRIDE
, uv_gpa(&buf
[i
* 4]), 0,
1067 XTYPE_DW
, 4, 1, IMA_INTERRUPT
);
1072 gru_wait_async_cbr(han
);
1073 for (i
= 0; i
< numcb
; i
++) {
1074 cb
= cb0
+ i
* GRU_HANDLE_STRIDE
;
1075 istatus
= gru_check_status(cb
);
1076 if (istatus
!= CBS_ACTIVE
&& istatus
!= CBS_CALL_OS
)
1081 if (istatus
!= CBS_IDLE
) {
1082 printk(KERN_DEBUG
"GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i
);
1084 } else if (buf
[4 * i
] || buf
[4 * i
+ 1] || buf
[4 * i
+ 2] ||
1086 printk(KERN_DEBUG
"GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
1087 smp_processor_id(), i
, buf
[4 * i
], buf
[4 * i
+ 1], buf
[4 * i
+ 2], buf
[4 * i
+ 3]);
1092 gen
->istatus
= CBS_CALL_OS
; /* don't handle this CBR again */
1096 gru_unlock_async_resource(han
);
1097 gru_release_async_resources(han
);
1104 static int quicktest3(unsigned long arg
)
1106 char buf1
[BUFSIZE
], buf2
[BUFSIZE
];
1109 memset(buf2
, 0, sizeof(buf2
));
1110 memset(buf1
, get_cycles() & 255, sizeof(buf1
));
1111 gru_copy_gpa(uv_gpa(buf2
), uv_gpa(buf1
), BUFSIZE
);
1112 if (memcmp(buf1
, buf2
, BUFSIZE
)) {
1113 printk(KERN_DEBUG
"GRU:%d quicktest3 error\n", smp_processor_id());
1120 * Debugging only. User hook for various kernel tests
1123 int gru_ktest(unsigned long arg
)
1127 switch (arg
& 0xff) {
1129 ret
= quicktest0(arg
);
1132 ret
= quicktest1(arg
);
1135 ret
= quicktest2(arg
);
1138 ret
= quicktest3(arg
);
1141 ret
= gru_free_kernel_contexts();
1148 int gru_kservices_init(void)
1153 void gru_kservices_exit(void)
1155 if (gru_free_kernel_contexts())