2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include "radeon_drm.h"
29 #include "radeon_reg.h"
32 void r100_cs_dump_packet(struct radeon_cs_parser
*p
,
33 struct radeon_cs_packet
*pkt
);
35 int radeon_cs_parser_relocs(struct radeon_cs_parser
*p
)
37 struct drm_device
*ddev
= p
->rdev
->ddev
;
38 struct radeon_cs_chunk
*chunk
;
42 if (p
->chunk_relocs_idx
== -1) {
45 chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
46 /* FIXME: we assume that each relocs use 4 dwords */
47 p
->nrelocs
= chunk
->length_dw
/ 4;
48 p
->relocs_ptr
= kcalloc(p
->nrelocs
, sizeof(void *), GFP_KERNEL
);
49 if (p
->relocs_ptr
== NULL
) {
52 p
->relocs
= kcalloc(p
->nrelocs
, sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
53 if (p
->relocs
== NULL
) {
56 for (i
= 0; i
< p
->nrelocs
; i
++) {
57 struct drm_radeon_cs_reloc
*r
;
60 r
= (struct drm_radeon_cs_reloc
*)&chunk
->kdata
[i
*4];
61 for (j
= 0; j
< i
; j
++) {
62 if (r
->handle
== p
->relocs
[j
].handle
) {
63 p
->relocs_ptr
[i
] = &p
->relocs
[j
];
69 p
->relocs
[i
].gobj
= drm_gem_object_lookup(ddev
,
72 if (p
->relocs
[i
].gobj
== NULL
) {
73 DRM_ERROR("gem object lookup failed 0x%x\n",
77 p
->relocs_ptr
[i
] = &p
->relocs
[i
];
78 p
->relocs
[i
].robj
= gem_to_radeon_bo(p
->relocs
[i
].gobj
);
79 p
->relocs
[i
].lobj
.bo
= p
->relocs
[i
].robj
;
80 p
->relocs
[i
].lobj
.wdomain
= r
->write_domain
;
81 p
->relocs
[i
].lobj
.rdomain
= r
->read_domains
;
82 p
->relocs
[i
].lobj
.tv
.bo
= &p
->relocs
[i
].robj
->tbo
;
83 p
->relocs
[i
].handle
= r
->handle
;
84 p
->relocs
[i
].flags
= r
->flags
;
85 radeon_bo_list_add_object(&p
->relocs
[i
].lobj
,
89 p
->relocs
[i
].handle
= 0;
91 return radeon_bo_list_validate(&p
->validated
);
94 static int radeon_cs_get_ring(struct radeon_cs_parser
*p
, u32 ring
, s32 priority
)
96 p
->priority
= priority
;
100 DRM_ERROR("unknown ring id: %d\n", ring
);
102 case RADEON_CS_RING_GFX
:
103 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
105 case RADEON_CS_RING_COMPUTE
:
106 if (p
->rdev
->family
>= CHIP_TAHITI
) {
108 p
->ring
= CAYMAN_RING_TYPE_CP1_INDEX
;
110 p
->ring
= CAYMAN_RING_TYPE_CP2_INDEX
;
112 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
118 static void radeon_cs_sync_rings(struct radeon_cs_parser
*p
)
122 for (i
= 0; i
< p
->nrelocs
; i
++) {
123 struct radeon_fence
*a
, *b
;
125 if (!p
->relocs
[i
].robj
|| !p
->relocs
[i
].robj
->tbo
.sync_obj
)
128 a
= p
->relocs
[i
].robj
->tbo
.sync_obj
;
129 b
= p
->ib
.sync_to
[a
->ring
];
130 p
->ib
.sync_to
[a
->ring
] = radeon_fence_later(a
, b
);
134 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
135 int radeon_cs_parser_init(struct radeon_cs_parser
*p
, void *data
)
137 struct drm_radeon_cs
*cs
= data
;
138 uint64_t *chunk_array_ptr
;
140 u32 ring
= RADEON_CS_RING_GFX
;
143 if (!cs
->num_chunks
) {
147 INIT_LIST_HEAD(&p
->validated
);
150 p
->ib
.semaphore
= NULL
;
151 p
->const_ib
.sa_bo
= NULL
;
152 p
->const_ib
.semaphore
= NULL
;
153 p
->chunk_ib_idx
= -1;
154 p
->chunk_relocs_idx
= -1;
155 p
->chunk_flags_idx
= -1;
156 p
->chunk_const_ib_idx
= -1;
157 p
->chunks_array
= kcalloc(cs
->num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
158 if (p
->chunks_array
== NULL
) {
161 chunk_array_ptr
= (uint64_t *)(unsigned long)(cs
->chunks
);
162 if (DRM_COPY_FROM_USER(p
->chunks_array
, chunk_array_ptr
,
163 sizeof(uint64_t)*cs
->num_chunks
)) {
167 p
->nchunks
= cs
->num_chunks
;
168 p
->chunks
= kcalloc(p
->nchunks
, sizeof(struct radeon_cs_chunk
), GFP_KERNEL
);
169 if (p
->chunks
== NULL
) {
172 for (i
= 0; i
< p
->nchunks
; i
++) {
173 struct drm_radeon_cs_chunk __user
**chunk_ptr
= NULL
;
174 struct drm_radeon_cs_chunk user_chunk
;
175 uint32_t __user
*cdata
;
177 chunk_ptr
= (void __user
*)(unsigned long)p
->chunks_array
[i
];
178 if (DRM_COPY_FROM_USER(&user_chunk
, chunk_ptr
,
179 sizeof(struct drm_radeon_cs_chunk
))) {
182 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
183 p
->chunks
[i
].kdata
= NULL
;
184 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
186 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) {
187 p
->chunk_relocs_idx
= i
;
189 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_IB
) {
191 /* zero length IB isn't useful */
192 if (p
->chunks
[i
].length_dw
== 0)
195 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_CONST_IB
) {
196 p
->chunk_const_ib_idx
= i
;
197 /* zero length CONST IB isn't useful */
198 if (p
->chunks
[i
].length_dw
== 0)
201 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
202 p
->chunk_flags_idx
= i
;
203 /* zero length flags aren't useful */
204 if (p
->chunks
[i
].length_dw
== 0)
208 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
209 p
->chunks
[i
].user_ptr
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
211 cdata
= (uint32_t *)(unsigned long)user_chunk
.chunk_data
;
212 if ((p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) ||
213 (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
)) {
214 size
= p
->chunks
[i
].length_dw
* sizeof(uint32_t);
215 p
->chunks
[i
].kdata
= kmalloc(size
, GFP_KERNEL
);
216 if (p
->chunks
[i
].kdata
== NULL
) {
219 if (DRM_COPY_FROM_USER(p
->chunks
[i
].kdata
,
220 p
->chunks
[i
].user_ptr
, size
)) {
223 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
224 p
->cs_flags
= p
->chunks
[i
].kdata
[0];
225 if (p
->chunks
[i
].length_dw
> 1)
226 ring
= p
->chunks
[i
].kdata
[1];
227 if (p
->chunks
[i
].length_dw
> 2)
228 priority
= (s32
)p
->chunks
[i
].kdata
[2];
233 /* these are KMS only */
235 if ((p
->cs_flags
& RADEON_CS_USE_VM
) &&
236 !p
->rdev
->vm_manager
.enabled
) {
237 DRM_ERROR("VM not active on asic!\n");
241 /* we only support VM on SI+ */
242 if ((p
->rdev
->family
>= CHIP_TAHITI
) &&
243 ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0)) {
244 DRM_ERROR("VM required on SI+!\n");
248 if (radeon_cs_get_ring(p
, ring
, priority
))
252 /* deal with non-vm */
253 if ((p
->chunk_ib_idx
!= -1) &&
254 ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0) &&
255 (p
->chunks
[p
->chunk_ib_idx
].chunk_id
== RADEON_CHUNK_ID_IB
)) {
256 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
> (16 * 1024)) {
257 DRM_ERROR("cs IB too big: %d\n",
258 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
261 if ((p
->rdev
->flags
& RADEON_IS_AGP
)) {
262 p
->chunks
[p
->chunk_ib_idx
].kpage
[0] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
263 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
264 if (p
->chunks
[p
->chunk_ib_idx
].kpage
[0] == NULL
||
265 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] == NULL
) {
266 kfree(p
->chunks
[i
].kpage
[0]);
267 kfree(p
->chunks
[i
].kpage
[1]);
271 p
->chunks
[p
->chunk_ib_idx
].kpage_idx
[0] = -1;
272 p
->chunks
[p
->chunk_ib_idx
].kpage_idx
[1] = -1;
273 p
->chunks
[p
->chunk_ib_idx
].last_copied_page
= -1;
274 p
->chunks
[p
->chunk_ib_idx
].last_page_index
=
275 ((p
->chunks
[p
->chunk_ib_idx
].length_dw
* 4) - 1) / PAGE_SIZE
;
281 static void radeon_bo_vm_fence_va(struct radeon_cs_parser
*parser
,
282 struct radeon_fence
*fence
)
284 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
285 struct radeon_vm
*vm
= &fpriv
->vm
;
286 struct radeon_bo_list
*lobj
;
288 if (parser
->chunk_ib_idx
== -1) {
291 if ((parser
->cs_flags
& RADEON_CS_USE_VM
) == 0) {
295 list_for_each_entry(lobj
, &parser
->validated
, tv
.head
) {
296 struct radeon_bo_va
*bo_va
;
297 struct radeon_bo
*rbo
= lobj
->bo
;
299 bo_va
= radeon_bo_va(rbo
, vm
);
300 radeon_fence_unref(&bo_va
->fence
);
301 bo_va
->fence
= radeon_fence_ref(fence
);
306 * cs_parser_fini() - clean parser states
307 * @parser: parser structure holding parsing context.
308 * @error: error number
310 * If error is set than unvalidate buffer, otherwise just free memory
311 * used by parsing context.
313 static void radeon_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
318 /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
319 radeon_bo_vm_fence_va(parser
, parser
->ib
.fence
);
320 ttm_eu_fence_buffer_objects(&parser
->validated
,
323 ttm_eu_backoff_reservation(&parser
->validated
);
326 if (parser
->relocs
!= NULL
) {
327 for (i
= 0; i
< parser
->nrelocs
; i
++) {
328 if (parser
->relocs
[i
].gobj
)
329 drm_gem_object_unreference_unlocked(parser
->relocs
[i
].gobj
);
332 kfree(parser
->track
);
333 kfree(parser
->relocs
);
334 kfree(parser
->relocs_ptr
);
335 for (i
= 0; i
< parser
->nchunks
; i
++) {
336 kfree(parser
->chunks
[i
].kdata
);
337 if ((parser
->rdev
->flags
& RADEON_IS_AGP
)) {
338 kfree(parser
->chunks
[i
].kpage
[0]);
339 kfree(parser
->chunks
[i
].kpage
[1]);
342 kfree(parser
->chunks
);
343 kfree(parser
->chunks_array
);
344 radeon_ib_free(parser
->rdev
, &parser
->ib
);
345 radeon_ib_free(parser
->rdev
, &parser
->const_ib
);
348 static int radeon_cs_ib_chunk(struct radeon_device
*rdev
,
349 struct radeon_cs_parser
*parser
)
351 struct radeon_cs_chunk
*ib_chunk
;
354 if (parser
->chunk_ib_idx
== -1)
357 if (parser
->cs_flags
& RADEON_CS_USE_VM
)
360 ib_chunk
= &parser
->chunks
[parser
->chunk_ib_idx
];
361 /* Copy the packet into the IB, the parser will read from the
362 * input memory (cached) and write to the IB (which can be
365 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
366 ib_chunk
->length_dw
* 4);
368 DRM_ERROR("Failed to get ib !\n");
371 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
372 r
= radeon_cs_parse(rdev
, parser
->ring
, parser
);
373 if (r
|| parser
->parser_error
) {
374 DRM_ERROR("Invalid command stream !\n");
377 r
= radeon_cs_finish_pages(parser
);
379 DRM_ERROR("Invalid command stream !\n");
382 radeon_cs_sync_rings(parser
);
383 parser
->ib
.vm_id
= 0;
384 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
);
386 DRM_ERROR("Failed to schedule IB !\n");
391 static int radeon_bo_vm_update_pte(struct radeon_cs_parser
*parser
,
392 struct radeon_vm
*vm
)
394 struct radeon_bo_list
*lobj
;
395 struct radeon_bo
*bo
;
398 list_for_each_entry(lobj
, &parser
->validated
, tv
.head
) {
400 r
= radeon_vm_bo_update_pte(parser
->rdev
, vm
, bo
, &bo
->tbo
.mem
);
408 static int radeon_cs_ib_vm_chunk(struct radeon_device
*rdev
,
409 struct radeon_cs_parser
*parser
)
411 struct radeon_cs_chunk
*ib_chunk
;
412 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
413 struct radeon_vm
*vm
= &fpriv
->vm
;
416 if (parser
->chunk_ib_idx
== -1)
418 if ((parser
->cs_flags
& RADEON_CS_USE_VM
) == 0)
421 if ((rdev
->family
>= CHIP_TAHITI
) &&
422 (parser
->chunk_const_ib_idx
!= -1)) {
423 ib_chunk
= &parser
->chunks
[parser
->chunk_const_ib_idx
];
424 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
425 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk
->length_dw
);
428 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->const_ib
,
429 ib_chunk
->length_dw
* 4);
431 DRM_ERROR("Failed to get const ib !\n");
434 parser
->const_ib
.is_const_ib
= true;
435 parser
->const_ib
.length_dw
= ib_chunk
->length_dw
;
436 /* Copy the packet into the IB */
437 if (DRM_COPY_FROM_USER(parser
->const_ib
.ptr
, ib_chunk
->user_ptr
,
438 ib_chunk
->length_dw
* 4)) {
441 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->const_ib
);
447 ib_chunk
= &parser
->chunks
[parser
->chunk_ib_idx
];
448 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
449 DRM_ERROR("cs IB too big: %d\n", ib_chunk
->length_dw
);
452 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
453 ib_chunk
->length_dw
* 4);
455 DRM_ERROR("Failed to get ib !\n");
458 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
459 /* Copy the packet into the IB */
460 if (DRM_COPY_FROM_USER(parser
->ib
.ptr
, ib_chunk
->user_ptr
,
461 ib_chunk
->length_dw
* 4)) {
464 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->ib
);
469 mutex_lock(&rdev
->vm_manager
.lock
);
470 mutex_lock(&vm
->mutex
);
471 r
= radeon_vm_bind(rdev
, vm
);
475 r
= radeon_bo_vm_update_pte(parser
, vm
);
479 radeon_cs_sync_rings(parser
);
481 parser
->ib
.vm_id
= vm
->id
;
482 /* ib pool is bind at 0 in virtual address space,
483 * so gpu_addr is the offset inside the pool bo
485 parser
->ib
.gpu_addr
= parser
->ib
.sa_bo
->soffset
;
487 if ((rdev
->family
>= CHIP_TAHITI
) &&
488 (parser
->chunk_const_ib_idx
!= -1)) {
489 parser
->const_ib
.vm_id
= vm
->id
;
490 /* ib pool is bind at 0 in virtual address space,
491 * so gpu_addr is the offset inside the pool bo
493 parser
->const_ib
.gpu_addr
= parser
->const_ib
.sa_bo
->soffset
;
494 r
= radeon_ib_schedule(rdev
, &parser
->ib
, &parser
->const_ib
);
496 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
);
502 radeon_fence_unref(&vm
->fence
);
504 vm
->fence
= radeon_fence_ref(parser
->ib
.fence
);
506 mutex_unlock(&vm
->mutex
);
507 mutex_unlock(&rdev
->vm_manager
.lock
);
511 static int radeon_cs_handle_lockup(struct radeon_device
*rdev
, int r
)
514 r
= radeon_gpu_reset(rdev
);
521 int radeon_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
523 struct radeon_device
*rdev
= dev
->dev_private
;
524 struct radeon_cs_parser parser
;
527 down_read(&rdev
->exclusive_lock
);
528 if (!rdev
->accel_working
) {
529 up_read(&rdev
->exclusive_lock
);
532 /* initialize parser */
533 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
536 parser
.dev
= rdev
->dev
;
537 parser
.family
= rdev
->family
;
538 r
= radeon_cs_parser_init(&parser
, data
);
540 DRM_ERROR("Failed to initialize parser !\n");
541 radeon_cs_parser_fini(&parser
, r
);
542 up_read(&rdev
->exclusive_lock
);
543 r
= radeon_cs_handle_lockup(rdev
, r
);
546 r
= radeon_cs_parser_relocs(&parser
);
548 if (r
!= -ERESTARTSYS
)
549 DRM_ERROR("Failed to parse relocation %d!\n", r
);
550 radeon_cs_parser_fini(&parser
, r
);
551 up_read(&rdev
->exclusive_lock
);
552 r
= radeon_cs_handle_lockup(rdev
, r
);
555 r
= radeon_cs_ib_chunk(rdev
, &parser
);
559 r
= radeon_cs_ib_vm_chunk(rdev
, &parser
);
564 radeon_cs_parser_fini(&parser
, r
);
565 up_read(&rdev
->exclusive_lock
);
566 r
= radeon_cs_handle_lockup(rdev
, r
);
570 int radeon_cs_finish_pages(struct radeon_cs_parser
*p
)
572 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
574 int size
= PAGE_SIZE
;
576 for (i
= ibc
->last_copied_page
+ 1; i
<= ibc
->last_page_index
; i
++) {
577 if (i
== ibc
->last_page_index
) {
578 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
583 if (DRM_COPY_FROM_USER(p
->ib
.ptr
+ (i
* (PAGE_SIZE
/4)),
584 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
591 static int radeon_cs_update_pages(struct radeon_cs_parser
*p
, int pg_idx
)
594 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
596 int size
= PAGE_SIZE
;
597 bool copy1
= (p
->rdev
->flags
& RADEON_IS_AGP
) ? false : true;
599 for (i
= ibc
->last_copied_page
+ 1; i
< pg_idx
; i
++) {
600 if (DRM_COPY_FROM_USER(p
->ib
.ptr
+ (i
* (PAGE_SIZE
/4)),
601 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
603 p
->parser_error
= -EFAULT
;
608 if (pg_idx
== ibc
->last_page_index
) {
609 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
614 new_page
= ibc
->kpage_idx
[0] < ibc
->kpage_idx
[1] ? 0 : 1;
616 ibc
->kpage
[new_page
] = p
->ib
.ptr
+ (pg_idx
* (PAGE_SIZE
/ 4));
618 if (DRM_COPY_FROM_USER(ibc
->kpage
[new_page
],
619 ibc
->user_ptr
+ (pg_idx
* PAGE_SIZE
),
621 p
->parser_error
= -EFAULT
;
625 /* copy to IB for non single case */
627 memcpy((void *)(p
->ib
.ptr
+(pg_idx
*(PAGE_SIZE
/4))), ibc
->kpage
[new_page
], size
);
629 ibc
->last_copied_page
= pg_idx
;
630 ibc
->kpage_idx
[new_page
] = pg_idx
;
635 u32
radeon_get_ib_value(struct radeon_cs_parser
*p
, int idx
)
637 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
638 u32 pg_idx
, pg_offset
;
642 pg_idx
= (idx
* 4) / PAGE_SIZE
;
643 pg_offset
= (idx
* 4) % PAGE_SIZE
;
645 if (ibc
->kpage_idx
[0] == pg_idx
)
646 return ibc
->kpage
[0][pg_offset
/4];
647 if (ibc
->kpage_idx
[1] == pg_idx
)
648 return ibc
->kpage
[1][pg_offset
/4];
650 new_page
= radeon_cs_update_pages(p
, pg_idx
);
652 p
->parser_error
= new_page
;
656 idx_value
= ibc
->kpage
[new_page
][pg_offset
/4];