1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
4 #include <linux/completion.h>
5 #include <linux/circ_buf.h>
6 #include <linux/list.h>
9 #include "a6xx_gmu.xml.h"
11 #define HFI_MSG_ID(val) [val] = #val
13 static const char * const a6xx_hfi_msg_id
[] = {
14 HFI_MSG_ID(HFI_H2F_MSG_INIT
),
15 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION
),
16 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE
),
17 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE
),
18 HFI_MSG_ID(HFI_H2F_MSG_TEST
),
21 static int a6xx_hfi_queue_read(struct a6xx_hfi_queue
*queue
, u32
*data
,
24 struct a6xx_hfi_queue_header
*header
= queue
->header
;
25 u32 i
, hdr
, index
= header
->read_index
;
27 if (header
->read_index
== header
->write_index
) {
28 header
->rx_request
= 1;
32 hdr
= queue
->data
[index
];
35 * If we are to assume that the GMU firmware is in fact a rational actor
36 * and is programmed to not send us a larger response than we expect
37 * then we can also assume that if the header size is unexpectedly large
38 * that it is due to memory corruption and/or hardware failure. In this
39 * case the only reasonable course of action is to BUG() to help harden
43 BUG_ON(HFI_HEADER_SIZE(hdr
) > dwords
);
45 for (i
= 0; i
< HFI_HEADER_SIZE(hdr
); i
++) {
46 data
[i
] = queue
->data
[index
];
47 index
= (index
+ 1) % header
->size
;
50 header
->read_index
= index
;
51 return HFI_HEADER_SIZE(hdr
);
54 static int a6xx_hfi_queue_write(struct a6xx_gmu
*gmu
,
55 struct a6xx_hfi_queue
*queue
, u32
*data
, u32 dwords
)
57 struct a6xx_hfi_queue_header
*header
= queue
->header
;
58 u32 i
, space
, index
= header
->write_index
;
60 spin_lock(&queue
->lock
);
62 space
= CIRC_SPACE(header
->write_index
, header
->read_index
,
66 spin_unlock(&queue
->lock
);
70 for (i
= 0; i
< dwords
; i
++) {
71 queue
->data
[index
] = data
[i
];
72 index
= (index
+ 1) % header
->size
;
75 header
->write_index
= index
;
76 spin_unlock(&queue
->lock
);
78 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
, 0x01);
82 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu
*gmu
, u32 id
, u32 seqnum
,
83 u32
*payload
, u32 payload_size
)
85 struct a6xx_hfi_queue
*queue
= &gmu
->queues
[HFI_RESPONSE_QUEUE
];
89 /* Wait for a response */
90 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
, val
,
91 val
& A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ
, 100, 5000);
94 DRM_DEV_ERROR(gmu
->dev
,
95 "Message %s id %d timed out waiting for response\n",
96 a6xx_hfi_msg_id
[id
], seqnum
);
100 /* Clear the interrupt */
101 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
,
102 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ
);
105 struct a6xx_hfi_msg_response resp
;
107 /* Get the next packet */
108 ret
= a6xx_hfi_queue_read(queue
, (u32
*) &resp
,
111 /* If the queue is empty our response never made it */
113 DRM_DEV_ERROR(gmu
->dev
,
114 "The HFI response queue is unexpectedly empty\n");
119 if (HFI_HEADER_ID(resp
.header
) == HFI_F2H_MSG_ERROR
) {
120 struct a6xx_hfi_msg_error
*error
=
121 (struct a6xx_hfi_msg_error
*) &resp
;
123 DRM_DEV_ERROR(gmu
->dev
, "GMU firmware error %d\n",
128 if (seqnum
!= HFI_HEADER_SEQNUM(resp
.ret_header
)) {
129 DRM_DEV_ERROR(gmu
->dev
,
130 "Unexpected message id %d on the response queue\n",
131 HFI_HEADER_SEQNUM(resp
.ret_header
));
136 DRM_DEV_ERROR(gmu
->dev
,
137 "Message %s id %d returned error %d\n",
138 a6xx_hfi_msg_id
[id
], seqnum
, resp
.error
);
142 /* All is well, copy over the buffer */
143 if (payload
&& payload_size
)
144 memcpy(payload
, resp
.payload
,
145 min_t(u32
, payload_size
, sizeof(resp
.payload
)));
151 static int a6xx_hfi_send_msg(struct a6xx_gmu
*gmu
, int id
,
152 void *data
, u32 size
, u32
*payload
, u32 payload_size
)
154 struct a6xx_hfi_queue
*queue
= &gmu
->queues
[HFI_COMMAND_QUEUE
];
155 int ret
, dwords
= size
>> 2;
158 seqnum
= atomic_inc_return(&queue
->seqnum
) % 0xfff;
160 /* First dword of the message is the message header - fill it in */
161 *((u32
*) data
) = (seqnum
<< 20) | (HFI_MSG_CMD
<< 16) |
164 ret
= a6xx_hfi_queue_write(gmu
, queue
, data
, dwords
);
166 DRM_DEV_ERROR(gmu
->dev
, "Unable to send message %s id %d\n",
167 a6xx_hfi_msg_id
[id
], seqnum
);
171 return a6xx_hfi_wait_for_ack(gmu
, id
, seqnum
, payload
, payload_size
);
174 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu
*gmu
, int boot_state
)
176 struct a6xx_hfi_msg_gmu_init_cmd msg
= { 0 };
178 msg
.dbg_buffer_addr
= (u32
) gmu
->debug
->iova
;
179 msg
.dbg_buffer_size
= (u32
) gmu
->debug
->size
;
180 msg
.boot_state
= boot_state
;
182 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_INIT
, &msg
, sizeof(msg
),
186 static int a6xx_hfi_get_fw_version(struct a6xx_gmu
*gmu
, u32
*version
)
188 struct a6xx_hfi_msg_fw_version msg
= { 0 };
190 /* Currently supporting version 1.1 */
191 msg
.supported_version
= (1 << 28) | (1 << 16);
193 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_FW_VERSION
, &msg
, sizeof(msg
),
194 version
, sizeof(*version
));
197 static int a6xx_hfi_send_perf_table(struct a6xx_gmu
*gmu
)
199 struct a6xx_hfi_msg_perf_table msg
= { 0 };
202 msg
.num_gpu_levels
= gmu
->nr_gpu_freqs
;
203 msg
.num_gmu_levels
= gmu
->nr_gmu_freqs
;
205 for (i
= 0; i
< gmu
->nr_gpu_freqs
; i
++) {
206 msg
.gx_votes
[i
].vote
= gmu
->gx_arc_votes
[i
];
207 msg
.gx_votes
[i
].freq
= gmu
->gpu_freqs
[i
] / 1000;
210 for (i
= 0; i
< gmu
->nr_gmu_freqs
; i
++) {
211 msg
.cx_votes
[i
].vote
= gmu
->cx_arc_votes
[i
];
212 msg
.cx_votes
[i
].freq
= gmu
->gmu_freqs
[i
] / 1000;
215 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_PERF_TABLE
, &msg
, sizeof(msg
),
219 static int a6xx_hfi_send_bw_table(struct a6xx_gmu
*gmu
)
221 struct a6xx_hfi_msg_bw_table msg
= { 0 };
224 * The sdm845 GMU doesn't do bus frequency scaling on its own but it
225 * does need at least one entry in the list because it might be accessed
226 * when the GMU is shutting down. Send a single "off" entry.
229 msg
.bw_level_num
= 1;
231 msg
.ddr_cmds_num
= 3;
232 msg
.ddr_wait_bitmask
= 0x07;
234 msg
.ddr_cmds_addrs
[0] = 0x50000;
235 msg
.ddr_cmds_addrs
[1] = 0x5005c;
236 msg
.ddr_cmds_addrs
[2] = 0x5000c;
238 msg
.ddr_cmds_data
[0][0] = 0x40000000;
239 msg
.ddr_cmds_data
[0][1] = 0x40000000;
240 msg
.ddr_cmds_data
[0][2] = 0x40000000;
243 * These are the CX (CNOC) votes. This is used but the values for the
244 * sdm845 GMU are known and fixed so we can hard code them.
247 msg
.cnoc_cmds_num
= 3;
248 msg
.cnoc_wait_bitmask
= 0x05;
250 msg
.cnoc_cmds_addrs
[0] = 0x50034;
251 msg
.cnoc_cmds_addrs
[1] = 0x5007c;
252 msg
.cnoc_cmds_addrs
[2] = 0x5004c;
254 msg
.cnoc_cmds_data
[0][0] = 0x40000000;
255 msg
.cnoc_cmds_data
[0][1] = 0x00000000;
256 msg
.cnoc_cmds_data
[0][2] = 0x40000000;
258 msg
.cnoc_cmds_data
[1][0] = 0x60000001;
259 msg
.cnoc_cmds_data
[1][1] = 0x20000001;
260 msg
.cnoc_cmds_data
[1][2] = 0x60000001;
262 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_BW_TABLE
, &msg
, sizeof(msg
),
266 static int a6xx_hfi_send_test(struct a6xx_gmu
*gmu
)
268 struct a6xx_hfi_msg_test msg
= { 0 };
270 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_TEST
, &msg
, sizeof(msg
),
274 int a6xx_hfi_start(struct a6xx_gmu
*gmu
, int boot_state
)
278 ret
= a6xx_hfi_send_gmu_init(gmu
, boot_state
);
282 ret
= a6xx_hfi_get_fw_version(gmu
, NULL
);
287 * We have to get exchange version numbers per the sequence but at this
288 * point th kernel driver doesn't need to know the exact version of
292 ret
= a6xx_hfi_send_perf_table(gmu
);
296 ret
= a6xx_hfi_send_bw_table(gmu
);
301 * Let the GMU know that there won't be any more HFI messages until next
304 a6xx_hfi_send_test(gmu
);
309 void a6xx_hfi_stop(struct a6xx_gmu
*gmu
)
313 for (i
= 0; i
< ARRAY_SIZE(gmu
->queues
); i
++) {
314 struct a6xx_hfi_queue
*queue
= &gmu
->queues
[i
];
319 if (queue
->header
->read_index
!= queue
->header
->write_index
)
320 DRM_DEV_ERROR(gmu
->dev
, "HFI queue %d is not empty\n", i
);
322 queue
->header
->read_index
= 0;
323 queue
->header
->write_index
= 0;
327 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue
*queue
,
328 struct a6xx_hfi_queue_header
*header
, void *virt
, u64 iova
,
331 spin_lock_init(&queue
->lock
);
332 queue
->header
= header
;
334 atomic_set(&queue
->seqnum
, 0);
336 /* Set up the shared memory header */
338 header
->type
= 10 << 8 | id
;
340 header
->size
= SZ_4K
>> 2;
341 header
->msg_size
= 0;
343 header
->rx_watermark
= 1;
344 header
->tx_watermark
= 1;
345 header
->rx_request
= 1;
346 header
->tx_request
= 0;
347 header
->read_index
= 0;
348 header
->write_index
= 0;
351 void a6xx_hfi_init(struct a6xx_gmu
*gmu
)
353 struct a6xx_gmu_bo
*hfi
= gmu
->hfi
;
354 struct a6xx_hfi_queue_table_header
*table
= hfi
->virt
;
355 struct a6xx_hfi_queue_header
*headers
= hfi
->virt
+ sizeof(*table
);
360 * The table size is the size of the table header plus all of the queue
363 table_size
= sizeof(*table
);
364 table_size
+= (ARRAY_SIZE(gmu
->queues
) *
365 sizeof(struct a6xx_hfi_queue_header
));
368 table
->size
= table_size
;
369 /* First queue header is located immediately after the table header */
370 table
->qhdr0_offset
= sizeof(*table
) >> 2;
371 table
->qhdr_size
= sizeof(struct a6xx_hfi_queue_header
) >> 2;
372 table
->num_queues
= ARRAY_SIZE(gmu
->queues
);
373 table
->active_queues
= ARRAY_SIZE(gmu
->queues
);
377 a6xx_hfi_queue_init(&gmu
->queues
[0], &headers
[0], hfi
->virt
+ offset
,
378 hfi
->iova
+ offset
, 0);
380 /* GMU response queue */
382 a6xx_hfi_queue_init(&gmu
->queues
[1], &headers
[1], hfi
->virt
+ offset
,
383 hfi
->iova
+ offset
, 4);