1 #ifndef __NVKM_PMU_MEMX_H__
2 #define __NVKM_PMU_MEMX_H__
17 memx_out(struct nvkm_memx
*memx
)
19 struct nvkm_device
*device
= memx
->pmu
->subdev
.device
;
23 nvkm_wr32(device
, 0x10a1c4, (memx
->c
.size
<< 16) | memx
->c
.mthd
);
24 for (i
= 0; i
< memx
->c
.size
; i
++)
25 nvkm_wr32(device
, 0x10a1c4, memx
->c
.data
[i
]);
32 memx_cmd(struct nvkm_memx
*memx
, u32 mthd
, u32 size
, u32 data
[])
34 if ((memx
->c
.size
+ size
>= ARRAY_SIZE(memx
->c
.data
)) ||
35 (memx
->c
.mthd
&& memx
->c
.mthd
!= mthd
))
37 memcpy(&memx
->c
.data
[memx
->c
.size
], data
, size
* sizeof(data
[0]));
43 nvkm_memx_init(struct nvkm_pmu
*pmu
, struct nvkm_memx
**pmemx
)
45 struct nvkm_device
*device
= pmu
->subdev
.device
;
46 struct nvkm_memx
*memx
;
50 ret
= nvkm_pmu_send(pmu
, reply
, PROC_MEMX
, MEMX_MSG_INFO
,
55 memx
= *pmemx
= kzalloc(sizeof(*memx
), GFP_KERNEL
);
59 memx
->base
= reply
[0];
60 memx
->size
= reply
[1];
62 /* acquire data segment access */
64 nvkm_wr32(device
, 0x10a580, 0x00000003);
65 } while (nvkm_rd32(device
, 0x10a580) != 0x00000003);
66 nvkm_wr32(device
, 0x10a1c0, 0x01000000 | memx
->base
);
71 nvkm_memx_fini(struct nvkm_memx
**pmemx
, bool exec
)
73 struct nvkm_memx
*memx
= *pmemx
;
74 struct nvkm_pmu
*pmu
= memx
->pmu
;
75 struct nvkm_subdev
*subdev
= &pmu
->subdev
;
76 struct nvkm_device
*device
= subdev
->device
;
79 /* flush the cache... */
82 /* release data segment access */
83 finish
= nvkm_rd32(device
, 0x10a1c0) & 0x00ffffff;
84 nvkm_wr32(device
, 0x10a580, 0x00000000);
86 /* call MEMX process to execute the script, and wait for reply */
88 nvkm_pmu_send(pmu
, reply
, PROC_MEMX
, MEMX_MSG_EXEC
,
92 nvkm_debug(subdev
, "Exec took %uns, PMU_IN %08x\n",
99 nvkm_memx_wr32(struct nvkm_memx
*memx
, u32 addr
, u32 data
)
101 nvkm_debug(&memx
->pmu
->subdev
, "R[%06x] = %08x\n", addr
, data
);
102 memx_cmd(memx
, MEMX_WR32
, 2, (u32
[]){ addr
, data
});
106 nvkm_memx_wait(struct nvkm_memx
*memx
,
107 u32 addr
, u32 mask
, u32 data
, u32 nsec
)
109 nvkm_debug(&memx
->pmu
->subdev
, "R[%06x] & %08x == %08x, %d us\n",
110 addr
, mask
, data
, nsec
);
111 memx_cmd(memx
, MEMX_WAIT
, 4, (u32
[]){ addr
, mask
, data
, nsec
});
112 memx_out(memx
); /* fuc can't handle multiple */
116 nvkm_memx_nsec(struct nvkm_memx
*memx
, u32 nsec
)
118 nvkm_debug(&memx
->pmu
->subdev
, " DELAY = %d ns\n", nsec
);
119 memx_cmd(memx
, MEMX_DELAY
, 1, (u32
[]){ nsec
});
120 memx_out(memx
); /* fuc can't handle multiple */
124 nvkm_memx_wait_vblank(struct nvkm_memx
*memx
)
126 struct nvkm_subdev
*subdev
= &memx
->pmu
->subdev
;
127 struct nvkm_device
*device
= subdev
->device
;
128 u32 heads
, x
, y
, px
= 0;
131 if (device
->chipset
< 0xd0) {
132 heads
= nvkm_rd32(device
, 0x610050);
133 for (i
= 0; i
< 2; i
++) {
134 /* Heuristic: sync to head with biggest resolution */
135 if (heads
& (2 << (i
<< 3))) {
136 x
= nvkm_rd32(device
, 0x610b40 + (0x540 * i
));
137 y
= (x
& 0xffff0000) >> 16;
148 nvkm_debug(subdev
, "WAIT VBLANK !NO ACTIVE HEAD\n");
152 nvkm_debug(subdev
, "WAIT VBLANK HEAD%d\n", head_sync
);
153 memx_cmd(memx
, MEMX_VBLANK
, 1, (u32
[]){ head_sync
});
154 memx_out(memx
); /* fuc can't handle multiple */
158 nvkm_memx_train(struct nvkm_memx
*memx
)
160 nvkm_debug(&memx
->pmu
->subdev
, " MEM TRAIN\n");
161 memx_cmd(memx
, MEMX_TRAIN
, 0, NULL
);
165 nvkm_memx_train_result(struct nvkm_pmu
*pmu
, u32
*res
, int rsize
)
167 struct nvkm_device
*device
= pmu
->subdev
.device
;
168 u32 reply
[2], base
, size
, i
;
171 ret
= nvkm_pmu_send(pmu
, reply
, PROC_MEMX
, MEMX_MSG_INFO
,
177 size
= reply
[1] >> 2;
181 /* read the packet */
182 nvkm_wr32(device
, 0x10a1c0, 0x02000000 | base
);
184 for (i
= 0; i
< size
; i
++)
185 res
[i
] = nvkm_rd32(device
, 0x10a1c4);
191 nvkm_memx_block(struct nvkm_memx
*memx
)
193 nvkm_debug(&memx
->pmu
->subdev
, " HOST BLOCKED\n");
194 memx_cmd(memx
, MEMX_ENTER
, 0, NULL
);
198 nvkm_memx_unblock(struct nvkm_memx
*memx
)
200 nvkm_debug(&memx
->pmu
->subdev
, " HOST UNBLOCKED\n");
201 memx_cmd(memx
, MEMX_LEAVE
, 0, NULL
);