1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * SN Platform GRU Driver
5 * GRU DRIVER TABLES, MACROS, externs, etc
7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
10 #ifndef __GRUTABLES_H__
11 #define __GRUTABLES_H__
15 * The GRU is a user addressible memory accelerator. It provides
16 * several forms of load, store, memset, bcopy instructions. In addition, it
17 * contains special instructions for AMOs, sending messages to message
20 * The GRU is an integral part of the node controller. It connects
21 * directly to the cpu socket. In its current implementation, there are 2
22 * GRU chiplets in the node controller on each blade (~node).
24 * The entire GRU memory space is fully coherent and cacheable by the cpus.
26 * Each GRU chiplet has a physical memory map that looks like the following:
39 * +-----------------+ _______ +-------------+
40 * |/////////////////| / | |
41 * |/////////////////| / | |
42 * |/////////////////| / | instructions|
43 * |/////////////////| / | |
44 * |/////////////////| / | |
45 * |/////////////////| / |-------------|
46 * |/////////////////| / | |
47 * +-----------------+ | |
48 * | context 15 | | data |
49 * +-----------------+ | |
51 * +-----------------+ \____________ +-------------+
57 * Each of the "contexts" is a chunk of memory that can be mmaped into user
58 * space. The context consists of 2 parts:
60 * - an instruction space that can be directly accessed by the user
61 * to issue GRU instructions and to check instruction status.
63 * - a data area that acts as normal RAM.
65 * User instructions contain virtual addresses of data to be accessed by the
66 * GRU. The GRU contains a TLB that is used to convert these user virtual
67 * addresses to physical addresses.
69 * The "system control" area of the GRU chiplet is used by the kernel driver
70 * to manage user contexts and to perform functions such as TLB dropin and
73 * One context may be reserved for the kernel and used for cross-partition
74 * communication. The GRU will also be used to asynchronously zero out
75 * large blocks of memory (not currently implemented).
80 * VDATA-VMA Data - Holds a few parameters. Head of linked list of
81 * GTS tables for threads using the GSEG
82 * GTS - Gru Thread State - contains info for managing a GSEG context. A
83 * GTS is allocated for each thread accessing a
85 * GTD - GRU Thread Data - contains shadow copy of GRU data when GSEG is
86 * not loaded into a GRU
87 * GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs
88 * where a GSEG has been loaded. Similar to
89 * an mm_struct but for GRU.
91 * GS - GRU State - Used to manage the state of a GRU chiplet
92 * BS - Blade State - Used to manage state of all GRU chiplets
96 * Normal task tables for task using GRU.
97 * - 2 threads in process
98 * - 2 GSEGs open in process
99 * - GSEG1 is being used by both threads
100 * - GSEG2 is used only by thread 2
103 * task ---+---> mm ->------ (notifier) -------+-> gms
105 * |--> vma -> vdata ---> gts--->| GSEG1 (thread1)
107 * | +-> gts--->| GSEG1 (thread2)
109 * |--> vma -> vdata ---> gts--->| GSEG2 (thread2)
113 * GSEGs are marked DONTCOPY on fork
116 * file.private_data -> NULL
121 * After gseg reference
126 * vma -> vdata -> gts
128 * (vma is not copied)
132 #include <linux/rmap.h>
133 #include <linux/interrupt.h>
134 #include <linux/mutex.h>
135 #include <linux/wait.h>
136 #include <linux/mmu_notifier.h>
137 #include <linux/mm_types.h>
140 #include "gruhandles.h"
142 extern struct gru_stats_s gru_stats
;
143 extern struct gru_blade_state
*gru_base
[];
144 extern unsigned long gru_start_paddr
, gru_end_paddr
;
145 extern void *gru_start_vaddr
;
146 extern unsigned int gru_max_gids
;
148 #define GRU_MAX_BLADES MAX_NUMNODES
149 #define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
151 #define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
152 #define GRU_DRIVER_VERSION_STR "0.85"
158 atomic_long_t vdata_alloc
;
159 atomic_long_t vdata_free
;
160 atomic_long_t gts_alloc
;
161 atomic_long_t gts_free
;
162 atomic_long_t gms_alloc
;
163 atomic_long_t gms_free
;
164 atomic_long_t gts_double_allocate
;
165 atomic_long_t assign_context
;
166 atomic_long_t assign_context_failed
;
167 atomic_long_t free_context
;
168 atomic_long_t load_user_context
;
169 atomic_long_t load_kernel_context
;
170 atomic_long_t lock_kernel_context
;
171 atomic_long_t unlock_kernel_context
;
172 atomic_long_t steal_user_context
;
173 atomic_long_t steal_kernel_context
;
174 atomic_long_t steal_context_failed
;
176 atomic_long_t asid_new
;
177 atomic_long_t asid_next
;
178 atomic_long_t asid_wrap
;
179 atomic_long_t asid_reuse
;
181 atomic_long_t intr_cbr
;
182 atomic_long_t intr_tfh
;
183 atomic_long_t intr_spurious
;
184 atomic_long_t intr_mm_lock_failed
;
185 atomic_long_t call_os
;
186 atomic_long_t call_os_wait_queue
;
187 atomic_long_t user_flush_tlb
;
188 atomic_long_t user_unload_context
;
189 atomic_long_t user_exception
;
190 atomic_long_t set_context_option
;
191 atomic_long_t check_context_retarget_intr
;
192 atomic_long_t check_context_unload
;
193 atomic_long_t tlb_dropin
;
194 atomic_long_t tlb_preload_page
;
195 atomic_long_t tlb_dropin_fail_no_asid
;
196 atomic_long_t tlb_dropin_fail_upm
;
197 atomic_long_t tlb_dropin_fail_invalid
;
198 atomic_long_t tlb_dropin_fail_range_active
;
199 atomic_long_t tlb_dropin_fail_idle
;
200 atomic_long_t tlb_dropin_fail_fmm
;
201 atomic_long_t tlb_dropin_fail_no_exception
;
202 atomic_long_t tfh_stale_on_fault
;
203 atomic_long_t mmu_invalidate_range
;
204 atomic_long_t mmu_invalidate_page
;
205 atomic_long_t flush_tlb
;
206 atomic_long_t flush_tlb_gru
;
207 atomic_long_t flush_tlb_gru_tgh
;
208 atomic_long_t flush_tlb_gru_zero_asid
;
210 atomic_long_t copy_gpa
;
211 atomic_long_t read_gpa
;
213 atomic_long_t mesq_receive
;
214 atomic_long_t mesq_receive_none
;
215 atomic_long_t mesq_send
;
216 atomic_long_t mesq_send_failed
;
217 atomic_long_t mesq_noop
;
218 atomic_long_t mesq_send_unexpected_error
;
219 atomic_long_t mesq_send_lb_overflow
;
220 atomic_long_t mesq_send_qlimit_reached
;
221 atomic_long_t mesq_send_amo_nacked
;
222 atomic_long_t mesq_send_put_nacked
;
223 atomic_long_t mesq_page_overflow
;
224 atomic_long_t mesq_qf_locked
;
225 atomic_long_t mesq_qf_noop_not_full
;
226 atomic_long_t mesq_qf_switch_head_failed
;
227 atomic_long_t mesq_qf_unexpected_error
;
228 atomic_long_t mesq_noop_unexpected_error
;
229 atomic_long_t mesq_noop_lb_overflow
;
230 atomic_long_t mesq_noop_qlimit_reached
;
231 atomic_long_t mesq_noop_amo_nacked
;
232 atomic_long_t mesq_noop_put_nacked
;
233 atomic_long_t mesq_noop_page_overflow
;
237 enum mcs_op
{cchop_allocate
, cchop_start
, cchop_interrupt
, cchop_interrupt_sync
,
238 cchop_deallocate
, tfhop_write_only
, tfhop_write_restart
,
239 tghop_invalidate
, mcsop_last
};
241 struct mcs_op_statistic
{
247 extern struct mcs_op_statistic mcs_op_statistics
[mcsop_last
];
253 #define IRQ_GRU 110 /* Starting IRQ number for interrupts */
255 /* Delay in jiffies between attempts to assign a GRU context */
256 #define GRU_ASSIGN_DELAY ((HZ * 20) / 1000)
259 * If a process has it's context stolen, min delay in jiffies before trying to
260 * steal a context from another process.
262 #define GRU_STEAL_DELAY ((HZ * 200) / 1000)
264 #define STAT(id) do { \
265 if (gru_options & OPT_STATS) \
266 atomic_long_inc(&gru_stats.id); \
269 #ifdef CONFIG_SGI_GRU_DEBUG
270 #define gru_dbg(dev, fmt, x...) \
272 if (gru_options & OPT_DPRINT) \
273 printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
276 #define gru_dbg(x...)
279 /*-----------------------------------------------------------------------------
282 #define MAX_ASID 0xfffff0
284 #define ASID_INC 8 /* number of regions */
286 /* Generate a GRU asid value from a GRU base asid & a virtual address. */
287 #define VADDR_HI_BIT 64
288 #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
289 #define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
291 /*------------------------------------------------------------------------------
298 * This structure is pointed to from the mmstruct via the notifier pointer.
299 * There is one of these per address space.
301 struct gru_mm_tracker
{ /* pack to reduce size */
302 unsigned int mt_asid_gen
:24; /* ASID wrap count */
303 unsigned int mt_asid
:24; /* current base ASID for gru */
304 unsigned short mt_ctxbitmap
:16;/* bitmap of contexts using
306 } __attribute__ ((packed
));
308 struct gru_mm_struct
{
309 struct mmu_notifier ms_notifier
;
310 spinlock_t ms_asid_lock
; /* protects ASID assignment */
311 atomic_t ms_range_active
;/* num range_invals active */
312 wait_queue_head_t ms_wait_queue
;
313 DECLARE_BITMAP(ms_asidmap
, GRU_MAX_GRUS
);
314 struct gru_mm_tracker ms_asids
[GRU_MAX_GRUS
];
318 * One of these structures is allocated when a GSEG is mmaped. The
319 * structure is pointed to by the vma->vm_private_data field in the vma struct.
321 struct gru_vma_data
{
322 spinlock_t vd_lock
; /* Serialize access to vma */
323 struct list_head vd_head
; /* head of linked list of gts */
324 long vd_user_options
;/* misc user option flags */
327 unsigned char vd_tlb_preload_count
;
331 * One of these is allocated for each thread accessing a mmaped GRU. A linked
332 * list of these structure is hung off the struct gru_vma_data in the mm_struct.
334 struct gru_thread_state
{
335 struct list_head ts_next
; /* list - head at vma-private */
336 struct mutex ts_ctxlock
; /* load/unload CTX lock */
337 struct mm_struct
*ts_mm
; /* mm currently mapped to
339 struct vm_area_struct
*ts_vma
; /* vma of GRU context */
340 struct gru_state
*ts_gru
; /* GRU where the context is
342 struct gru_mm_struct
*ts_gms
; /* asid & ioproc struct */
343 unsigned char ts_tlb_preload_count
; /* TLB preload pages */
344 unsigned long ts_cbr_map
; /* map of allocated CBRs */
345 unsigned long ts_dsr_map
; /* map of allocated DATA
347 unsigned long ts_steal_jiffies
;/* jiffies when context last
349 long ts_user_options
;/* misc user option flags */
350 pid_t ts_tgid_owner
; /* task that is using the
351 context - for migration */
352 short ts_user_blade_id
;/* user selected blade */
353 char ts_user_chiplet_id
;/* user selected chiplet */
354 unsigned short ts_sizeavail
; /* Pagesizes in use */
355 int ts_tsid
; /* thread that owns the
357 int ts_tlb_int_select
;/* target cpu if interrupts
359 int ts_ctxnum
; /* context number where the
361 atomic_t ts_refcnt
; /* reference count GTS */
362 unsigned char ts_dsr_au_count
;/* Number of DSR resources
363 required for contest */
364 unsigned char ts_cbr_au_count
;/* Number of CBR resources
365 required for contest */
366 char ts_cch_req_slice
;/* CCH packet slice */
367 char ts_blade
; /* If >= 0, migrate context if
368 ref from different blade */
369 char ts_force_cch_reload
;
370 char ts_cbr_idx
[GRU_CBR_AU
];/* CBR numbers of each
372 int ts_data_valid
; /* Indicates if ts_gdata has
374 struct gru_gseg_statistics ustats
; /* User statistics */
375 unsigned long ts_gdata
[0]; /* save area for GRU data (CB,
380 * Threaded programs actually allocate an array of GSEGs when a context is
381 * created. Each thread uses a separate GSEG. TSID is the index into the GSEG
384 #define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE)
385 #define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \
386 (gts)->ts_tsid * GRU_GSEG_PAGESIZE)
388 #define NULLCTX (-1) /* if context not loaded into GRU */
390 /*-----------------------------------------------------------------------------
395 * One of these exists for each GRU chiplet.
398 struct gru_blade_state
*gs_blade
; /* GRU state for entire
400 unsigned long gs_gru_base_paddr
; /* Physical address of
402 void *gs_gru_base_vaddr
; /* Virtual address of
404 unsigned short gs_gid
; /* unique GRU number */
405 unsigned short gs_blade_id
; /* blade of GRU */
406 unsigned char gs_chiplet_id
; /* blade chiplet of GRU */
407 unsigned char gs_tgh_local_shift
; /* used to pick TGH for
409 unsigned char gs_tgh_first_remote
; /* starting TGH# for
411 spinlock_t gs_asid_lock
; /* lock used for
413 spinlock_t gs_lock
; /* lock used for
414 assigning contexts */
416 /* -- the following are protected by the gs_asid_lock spinlock ---- */
417 unsigned int gs_asid
; /* Next availe ASID */
418 unsigned int gs_asid_limit
; /* Limit of available
420 unsigned int gs_asid_gen
; /* asid generation.
423 /* --- the following fields are protected by the gs_lock spinlock --- */
424 unsigned long gs_context_map
; /* bitmap to manage
426 unsigned long gs_cbr_map
; /* bitmap to manage CB
428 unsigned long gs_dsr_map
; /* bitmap used to manage
430 unsigned int gs_reserved_cbrs
; /* Number of kernel-
432 unsigned int gs_reserved_dsr_bytes
; /* Bytes of kernel-
434 unsigned short gs_active_contexts
; /* number of contexts
436 struct gru_thread_state
*gs_gts
[GRU_NUM_CCH
]; /* GTS currently using
438 int gs_irq
[GRU_NUM_TFM
]; /* Interrupt irqs */
442 * This structure contains the GRU state for all the GRUs on a blade.
444 struct gru_blade_state
{
445 void *kernel_cb
; /* First kernel
447 void *kernel_dsr
; /* First kernel
449 struct rw_semaphore bs_kgts_sema
; /* lock for kgts */
450 struct gru_thread_state
*bs_kgts
; /* GTS for kernel use */
452 /* ---- the following are used for managing kernel async GRU CBRs --- */
453 int bs_async_dsr_bytes
; /* DSRs for async */
454 int bs_async_cbrs
; /* CBRs AU for async */
455 struct completion
*bs_async_wq
;
457 /* ---- the following are protected by the bs_lock spinlock ---- */
458 spinlock_t bs_lock
; /* lock used for
460 int bs_lru_ctxnum
; /* STEAL - last context
462 struct gru_state
*bs_lru_gru
; /* STEAL - last gru
465 struct gru_state bs_grus
[GRU_CHIPLETS_PER_BLADE
];
468 /*-----------------------------------------------------------------------------
471 #define get_tfm_for_cpu(g, c) \
472 ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c)))
473 #define get_tfh_by_index(g, i) \
474 ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i)))
475 #define get_tgh_by_index(g, i) \
476 ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i)))
477 #define get_cbe_by_index(g, i) \
478 ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\
481 /*-----------------------------------------------------------------------------
485 /* Given a blade# & chiplet#, get a pointer to the GRU */
486 #define get_gru(b, c) (&gru_base[b]->bs_grus[c])
488 /* Number of bytes to save/restore when unloading/loading GRU contexts */
489 #define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES)
490 #define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2)
492 /* Convert a user CB number to the actual CBRNUM */
493 #define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \
494 * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE)
496 /* Convert a gid to a pointer to the GRU */
497 #define GID_TO_GRU(gid) \
498 (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \
499 (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \
500 bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \
503 /* Scan all active GRUs in a GRU bitmap */
504 #define for_each_gru_in_bitmap(gid, map) \
505 for_each_set_bit((gid), (map), GRU_MAX_GRUS)
507 /* Scan all active GRUs on a specific blade */
508 #define for_each_gru_on_blade(gru, nid, i) \
509 for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \
510 (i) < GRU_CHIPLETS_PER_BLADE; \
514 #define foreach_gid(gid) \
515 for ((gid) = 0; (gid) < gru_max_gids; (gid)++)
517 /* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */
518 #define for_each_gts_on_gru(gts, gru, ctxnum) \
519 for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \
520 if (((gts) = (gru)->gs_gts[ctxnum]))
522 /* Scan each CBR whose bit is set in a TFM (or copy of) */
523 #define for_each_cbr_in_tfm(i, map) \
524 for_each_set_bit((i), (map), GRU_NUM_CBE)
526 /* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */
527 #define for_each_cbr_in_allocation_map(i, map, k) \
528 for_each_set_bit((k), (map), GRU_CBR_AU) \
529 for ((i) = (k)*GRU_CBR_AU_SIZE; \
530 (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++)
532 /* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */
533 #define for_each_dsr_in_allocation_map(i, map, k) \
534 for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \
535 for ((i) = (k) * GRU_DSR_AU_CL; \
536 (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++)
538 #define gseg_physical_address(gru, ctxnum) \
539 ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE)
540 #define gseg_virtual_address(gru, ctxnum) \
541 ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE)
543 /*-----------------------------------------------------------------------------
544 * Lock / Unlock GRU handles
545 * Use the "delresp" bit in the handle as a "lock" bit.
548 /* Lock hierarchy checking enabled only in emulator */
550 /* 0 = lock failed, 1 = locked */
551 static inline int __trylock_handle(void *h
)
553 return !test_and_set_bit(1, h
);
556 static inline void __lock_handle(void *h
)
558 while (test_and_set_bit(1, h
))
562 static inline void __unlock_handle(void *h
)
567 static inline int trylock_cch_handle(struct gru_context_configuration_handle
*cch
)
569 return __trylock_handle(cch
);
572 static inline void lock_cch_handle(struct gru_context_configuration_handle
*cch
)
577 static inline void unlock_cch_handle(struct gru_context_configuration_handle
580 __unlock_handle(cch
);
583 static inline void lock_tgh_handle(struct gru_tlb_global_handle
*tgh
)
588 static inline void unlock_tgh_handle(struct gru_tlb_global_handle
*tgh
)
590 __unlock_handle(tgh
);
593 static inline int is_kernel_context(struct gru_thread_state
*gts
)
599 * The following are for Nehelem-EX. A more general scheme is needed for
602 #define UV_MAX_INT_CORES 8
603 #define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
604 #define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
605 #define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
606 ((cpu_physical_id(p) >> 1) & 3))
607 /*-----------------------------------------------------------------------------
608 * Function prototypes & externs
610 struct gru_unload_context_req
;
612 extern const struct vm_operations_struct gru_vm_ops
;
613 extern struct device
*grudev
;
615 extern struct gru_vma_data
*gru_alloc_vma_data(struct vm_area_struct
*vma
,
617 extern struct gru_thread_state
*gru_find_thread_state(struct vm_area_struct
619 extern struct gru_thread_state
*gru_alloc_thread_state(struct vm_area_struct
621 extern struct gru_state
*gru_assign_gru_context(struct gru_thread_state
*gts
);
622 extern void gru_load_context(struct gru_thread_state
*gts
);
623 extern void gru_steal_context(struct gru_thread_state
*gts
);
624 extern void gru_unload_context(struct gru_thread_state
*gts
, int savestate
);
625 extern int gru_update_cch(struct gru_thread_state
*gts
);
626 extern void gts_drop(struct gru_thread_state
*gts
);
627 extern void gru_tgh_flush_init(struct gru_state
*gru
);
628 extern int gru_kservices_init(void);
629 extern void gru_kservices_exit(void);
630 extern irqreturn_t
gru0_intr(int irq
, void *dev_id
);
631 extern irqreturn_t
gru1_intr(int irq
, void *dev_id
);
632 extern irqreturn_t
gru_intr_mblade(int irq
, void *dev_id
);
633 extern int gru_dump_chiplet_request(unsigned long arg
);
634 extern long gru_get_gseg_statistics(unsigned long arg
);
635 extern int gru_handle_user_call_os(unsigned long address
);
636 extern int gru_user_flush_tlb(unsigned long arg
);
637 extern int gru_user_unload_context(unsigned long arg
);
638 extern int gru_get_exception_detail(unsigned long arg
);
639 extern int gru_set_context_option(unsigned long address
);
640 extern void gru_check_context_placement(struct gru_thread_state
*gts
);
641 extern int gru_cpu_fault_map_id(void);
642 extern struct vm_area_struct
*gru_find_vma(unsigned long vaddr
);
643 extern void gru_flush_all_tlb(struct gru_state
*gru
);
644 extern int gru_proc_init(void);
645 extern void gru_proc_exit(void);
647 extern struct gru_thread_state
*gru_alloc_gts(struct vm_area_struct
*vma
,
648 int cbr_au_count
, int dsr_au_count
,
649 unsigned char tlb_preload_count
, int options
, int tsid
);
650 extern unsigned long gru_reserve_cb_resources(struct gru_state
*gru
,
651 int cbr_au_count
, char *cbmap
);
652 extern unsigned long gru_reserve_ds_resources(struct gru_state
*gru
,
653 int dsr_au_count
, char *dsmap
);
654 extern vm_fault_t
gru_fault(struct vm_fault
*vmf
);
655 extern struct gru_mm_struct
*gru_register_mmu_notifier(void);
656 extern void gru_drop_mmu_notifier(struct gru_mm_struct
*gms
);
658 extern int gru_ktest(unsigned long arg
);
659 extern void gru_flush_tlb_range(struct gru_mm_struct
*gms
, unsigned long start
,
662 extern unsigned long gru_options
;
664 #endif /* __GRUTABLES_H__ */