1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
7 #include "intel_guc_ct.h"
9 #ifdef CONFIG_DRM_I915_DEBUG_GUC
10 #define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
12 #define CT_DEBUG_DRIVER(...) do { } while (0)
16 struct list_head link
;
23 struct ct_incoming_request
{
24 struct list_head link
;
28 enum { CTB_SEND
= 0, CTB_RECV
= 1 };
30 enum { CTB_OWNER_HOST
= 0 };
32 static void ct_incoming_request_worker_func(struct work_struct
*w
);
35 * intel_guc_ct_init_early - Initialize CT state without requiring device access
36 * @ct: pointer to CT struct
38 void intel_guc_ct_init_early(struct intel_guc_ct
*ct
)
40 spin_lock_init(&ct
->requests
.lock
);
41 INIT_LIST_HEAD(&ct
->requests
.pending
);
42 INIT_LIST_HEAD(&ct
->requests
.incoming
);
43 INIT_WORK(&ct
->requests
.worker
, ct_incoming_request_worker_func
);
46 static inline struct intel_guc
*ct_to_guc(struct intel_guc_ct
*ct
)
48 return container_of(ct
, struct intel_guc
, ct
);
51 static inline const char *guc_ct_buffer_type_to_str(u32 type
)
54 case INTEL_GUC_CT_BUFFER_TYPE_SEND
:
56 case INTEL_GUC_CT_BUFFER_TYPE_RECV
:
63 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc
*desc
,
64 u32 cmds_addr
, u32 size
)
66 CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr
, size
);
67 memset(desc
, 0, sizeof(*desc
));
68 desc
->addr
= cmds_addr
;
70 desc
->owner
= CTB_OWNER_HOST
;
73 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc
*desc
)
75 CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
76 desc
, desc
->head
, desc
->tail
);
79 desc
->is_in_error
= 0;
82 static int guc_action_register_ct_buffer(struct intel_guc
*guc
,
87 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER
,
89 sizeof(struct guc_ct_buffer_desc
),
94 /* Can't use generic send(), CT registration must go over MMIO */
95 err
= intel_guc_send_mmio(guc
, action
, ARRAY_SIZE(action
), NULL
, 0);
97 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
98 guc_ct_buffer_type_to_str(type
), err
);
102 static int guc_action_deregister_ct_buffer(struct intel_guc
*guc
,
106 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER
,
112 /* Can't use generic send(), CT deregistration must go over MMIO */
113 err
= intel_guc_send_mmio(guc
, action
, ARRAY_SIZE(action
), NULL
, 0);
115 DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
116 guc_ct_buffer_type_to_str(type
), err
);
121 * intel_guc_ct_init - Init buffer-based communication
122 * @ct: pointer to CT struct
124 * Allocate memory required for buffer-based communication.
126 * Return: 0 on success, a negative errno code on failure.
128 int intel_guc_ct_init(struct intel_guc_ct
*ct
)
130 struct intel_guc
*guc
= ct_to_guc(ct
);
137 /* We allocate 1 page to hold both descriptors and both buffers.
138 * ___________.....................
140 * |___________| PAGE/4
141 * :___________....................:
143 * |___________| PAGE/4
144 * :_______________________________:
147 * |_______________________________|
150 * |_______________________________|
152 * Each message can use a maximum of 32 dwords and we don't expect to
153 * have more than 1 in flight at any time, so we have enough space.
154 * Some logic further ahead will rely on the fact that there is only 1
155 * page and that it is always mapped, so if the size is changed the
156 * other code will need updating as well.
159 err
= intel_guc_allocate_and_map_vma(guc
, PAGE_SIZE
, &ct
->vma
, &blob
);
161 DRM_ERROR("CT: channel allocation failed; err=%d\n", err
);
165 CT_DEBUG_DRIVER("CT: vma base=%#x\n",
166 intel_guc_ggtt_offset(guc
, ct
->vma
));
168 /* store pointers to desc and cmds */
169 for (i
= 0; i
< ARRAY_SIZE(ct
->ctbs
); i
++) {
170 GEM_BUG_ON((i
!= CTB_SEND
) && (i
!= CTB_RECV
));
171 ct
->ctbs
[i
].desc
= blob
+ PAGE_SIZE
/4 * i
;
172 ct
->ctbs
[i
].cmds
= blob
+ PAGE_SIZE
/4 * i
+ PAGE_SIZE
/2;
179 * intel_guc_ct_fini - Fini buffer-based communication
180 * @ct: pointer to CT struct
182 * Deallocate memory required for buffer-based communication.
184 void intel_guc_ct_fini(struct intel_guc_ct
*ct
)
186 GEM_BUG_ON(ct
->enabled
);
188 i915_vma_unpin_and_release(&ct
->vma
, I915_VMA_RELEASE_MAP
);
192 * intel_guc_ct_enable - Enable buffer based command transport.
193 * @ct: pointer to CT struct
195 * Return: 0 on success, a negative errno code on failure.
197 int intel_guc_ct_enable(struct intel_guc_ct
*ct
)
199 struct intel_guc
*guc
= ct_to_guc(ct
);
204 GEM_BUG_ON(ct
->enabled
);
206 /* vma should be already allocated and map'ed */
207 GEM_BUG_ON(!ct
->vma
);
208 base
= intel_guc_ggtt_offset(guc
, ct
->vma
);
210 /* (re)initialize descriptors
211 * cmds buffers are in the second half of the blob page
213 for (i
= 0; i
< ARRAY_SIZE(ct
->ctbs
); i
++) {
214 GEM_BUG_ON((i
!= CTB_SEND
) && (i
!= CTB_RECV
));
215 guc_ct_buffer_desc_init(ct
->ctbs
[i
].desc
,
216 base
+ PAGE_SIZE
/4 * i
+ PAGE_SIZE
/2,
220 /* register buffers, starting wirh RECV buffer
221 * descriptors are in first half of the blob
223 err
= guc_action_register_ct_buffer(guc
,
224 base
+ PAGE_SIZE
/4 * CTB_RECV
,
225 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
229 err
= guc_action_register_ct_buffer(guc
,
230 base
+ PAGE_SIZE
/4 * CTB_SEND
,
231 INTEL_GUC_CT_BUFFER_TYPE_SEND
);
240 guc_action_deregister_ct_buffer(guc
,
241 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
243 DRM_ERROR("CT: can't open channel; err=%d\n", err
);
248 * intel_guc_ct_disable - Disable buffer based command transport.
249 * @ct: pointer to CT struct
251 void intel_guc_ct_disable(struct intel_guc_ct
*ct
)
253 struct intel_guc
*guc
= ct_to_guc(ct
);
255 GEM_BUG_ON(!ct
->enabled
);
259 if (intel_guc_is_running(guc
)) {
260 guc_action_deregister_ct_buffer(guc
,
261 INTEL_GUC_CT_BUFFER_TYPE_SEND
);
262 guc_action_deregister_ct_buffer(guc
,
263 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
267 static u32
ct_get_next_fence(struct intel_guc_ct
*ct
)
269 /* For now it's trivial */
270 return ++ct
->requests
.next_fence
;
274 * DOC: CTB Host to GuC request
276 * Format of the CTB Host to GuC request message is as follows::
278 * +------------+---------+---------+---------+---------+
279 * | msg[0] | [1] | [2] | ... | [n-1] |
280 * +------------+---------+---------+---------+---------+
281 * | MESSAGE | MESSAGE PAYLOAD |
282 * + HEADER +---------+---------+---------+---------+
283 * | | 0 | 1 | ... | n |
284 * +============+=========+=========+=========+=========+
285 * | len >= 1 | FENCE | request specific data |
286 * +------+-----+---------+---------+---------+---------+
288 * ^-----------------len-------------------^
291 static int ctb_write(struct intel_guc_ct_buffer
*ctb
,
293 u32 len
/* in dwords */,
297 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
298 u32 head
= desc
->head
/ 4; /* in dwords */
299 u32 tail
= desc
->tail
/ 4; /* in dwords */
300 u32 size
= desc
->size
/ 4; /* in dwords */
301 u32 used
; /* in dwords */
303 u32
*cmds
= ctb
->cmds
;
306 GEM_BUG_ON(desc
->size
% 4);
307 GEM_BUG_ON(desc
->head
% 4);
308 GEM_BUG_ON(desc
->tail
% 4);
309 GEM_BUG_ON(tail
>= size
);
312 * tail == head condition indicates empty. GuC FW does not support
313 * using up the entire buffer to get tail == head meaning full.
316 used
= (size
- head
) + tail
;
320 /* make sure there is a space including extra dw for the fence */
321 if (unlikely(used
+ len
+ 1 >= size
))
325 * Write the message. The format is the following:
326 * DW0: header (including action code)
330 header
= (len
<< GUC_CT_MSG_LEN_SHIFT
) |
331 (GUC_CT_MSG_WRITE_FENCE_TO_DESC
) |
332 (want_response
? GUC_CT_MSG_SEND_STATUS
: 0) |
333 (action
[0] << GUC_CT_MSG_ACTION_SHIFT
);
335 CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
336 4, &header
, 4, &fence
,
337 4 * (len
- 1), &action
[1]);
340 tail
= (tail
+ 1) % size
;
343 tail
= (tail
+ 1) % size
;
345 for (i
= 1; i
< len
; i
++) {
346 cmds
[tail
] = action
[i
];
347 tail
= (tail
+ 1) % size
;
350 /* now update desc tail (back in bytes) */
351 desc
->tail
= tail
* 4;
352 GEM_BUG_ON(desc
->tail
> desc
->size
);
358 * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
359 * @desc: buffer descriptor
360 * @fence: response fence
361 * @status: placeholder for status
363 * Guc will update CT buffer descriptor with new fence and status
364 * after processing the command identified by the fence. Wait for
365 * specified fence and then read from the descriptor status of the
369 * * 0 response received (status is valid)
370 * * -ETIMEDOUT no response within hardcoded timeout
371 * * -EPROTO no response, CT buffer is in error
373 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc
*desc
,
380 * Fast commands should complete in less than 10us, so sample quickly
381 * up to that length of time, then switch to a slower sleep-wait loop.
382 * No GuC command should ever take longer than 10ms.
384 #define done (READ_ONCE(desc->fence) == fence)
385 err
= wait_for_us(done
, 10);
387 err
= wait_for(done
, 10);
391 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
394 if (WARN_ON(desc
->is_in_error
)) {
395 /* Something went wrong with the messaging, try to reset
396 * the buffer and hope for the best
398 guc_ct_buffer_desc_reset(desc
);
403 *status
= desc
->status
;
408 * wait_for_ct_request_update - Wait for CT request state update.
409 * @req: pointer to pending request
410 * @status: placeholder for status
412 * For each sent request, Guc shall send bac CT response message.
413 * Our message handler will update status of tracked request once
414 * response message with given fence is received. Wait here and
415 * check for valid response status value.
418 * * 0 response received (status is valid)
419 * * -ETIMEDOUT no response within hardcoded timeout
421 static int wait_for_ct_request_update(struct ct_request
*req
, u32
*status
)
426 * Fast commands should complete in less than 10us, so sample quickly
427 * up to that length of time, then switch to a slower sleep-wait loop.
428 * No GuC command should ever take longer than 10ms.
430 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
431 err
= wait_for_us(done
, 10);
433 err
= wait_for(done
, 10);
437 DRM_ERROR("CT: fence %u err %d\n", req
->fence
, err
);
439 *status
= req
->status
;
443 static int ct_send(struct intel_guc_ct
*ct
,
447 u32 response_buf_size
,
450 struct intel_guc_ct_buffer
*ctb
= &ct
->ctbs
[CTB_SEND
];
451 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
452 struct ct_request request
;
457 GEM_BUG_ON(!ct
->enabled
);
459 GEM_BUG_ON(len
& ~GUC_CT_MSG_LEN_MASK
);
460 GEM_BUG_ON(!response_buf
&& response_buf_size
);
462 fence
= ct_get_next_fence(ct
);
463 request
.fence
= fence
;
465 request
.response_len
= response_buf_size
;
466 request
.response_buf
= response_buf
;
468 spin_lock_irqsave(&ct
->requests
.lock
, flags
);
469 list_add_tail(&request
.link
, &ct
->requests
.pending
);
470 spin_unlock_irqrestore(&ct
->requests
.lock
, flags
);
472 err
= ctb_write(ctb
, action
, len
, fence
, !!response_buf
);
476 intel_guc_notify(ct_to_guc(ct
));
479 err
= wait_for_ct_request_update(&request
, status
);
481 err
= wait_for_ctb_desc_update(desc
, fence
, status
);
485 if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status
)) {
491 /* There shall be no data in the status */
492 WARN_ON(INTEL_GUC_MSG_TO_DATA(request
.status
));
493 /* Return actual response len */
494 err
= request
.response_len
;
496 /* There shall be no response payload */
497 WARN_ON(request
.response_len
);
498 /* Return data decoded from the status dword */
499 err
= INTEL_GUC_MSG_TO_DATA(*status
);
503 spin_lock_irqsave(&ct
->requests
.lock
, flags
);
504 list_del(&request
.link
);
505 spin_unlock_irqrestore(&ct
->requests
.lock
, flags
);
511 * Command Transport (CT) buffer based GuC send function.
513 int intel_guc_ct_send(struct intel_guc_ct
*ct
, const u32
*action
, u32 len
,
514 u32
*response_buf
, u32 response_buf_size
)
516 struct intel_guc
*guc
= ct_to_guc(ct
);
517 u32 status
= ~0; /* undefined */
520 if (unlikely(!ct
->enabled
)) {
521 WARN(1, "Unexpected send: action=%#x\n", *action
);
525 mutex_lock(&guc
->send_mutex
);
527 ret
= ct_send(ct
, action
, len
, response_buf
, response_buf_size
, &status
);
528 if (unlikely(ret
< 0)) {
529 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
530 action
[0], ret
, status
);
531 } else if (unlikely(ret
)) {
532 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
533 action
[0], ret
, ret
);
536 mutex_unlock(&guc
->send_mutex
);
540 static inline unsigned int ct_header_get_len(u32 header
)
542 return (header
>> GUC_CT_MSG_LEN_SHIFT
) & GUC_CT_MSG_LEN_MASK
;
545 static inline unsigned int ct_header_get_action(u32 header
)
547 return (header
>> GUC_CT_MSG_ACTION_SHIFT
) & GUC_CT_MSG_ACTION_MASK
;
550 static inline bool ct_header_is_response(u32 header
)
552 return !!(header
& GUC_CT_MSG_IS_RESPONSE
);
555 static int ctb_read(struct intel_guc_ct_buffer
*ctb
, u32
*data
)
557 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
558 u32 head
= desc
->head
/ 4; /* in dwords */
559 u32 tail
= desc
->tail
/ 4; /* in dwords */
560 u32 size
= desc
->size
/ 4; /* in dwords */
561 u32
*cmds
= ctb
->cmds
;
562 s32 available
; /* in dwords */
566 GEM_BUG_ON(desc
->size
% 4);
567 GEM_BUG_ON(desc
->head
% 4);
568 GEM_BUG_ON(desc
->tail
% 4);
569 GEM_BUG_ON(tail
>= size
);
570 GEM_BUG_ON(head
>= size
);
572 /* tail == head condition indicates empty */
573 available
= tail
- head
;
574 if (unlikely(available
== 0))
577 /* beware of buffer wrap case */
578 if (unlikely(available
< 0))
580 CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available
, head
, tail
);
581 GEM_BUG_ON(available
< 0);
583 data
[0] = cmds
[head
];
584 head
= (head
+ 1) % size
;
586 /* message len with header */
587 len
= ct_header_get_len(data
[0]) + 1;
588 if (unlikely(len
> (u32
)available
)) {
589 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
591 4 * (head
+ available
- 1 > size
?
592 size
- head
: available
- 1), &cmds
[head
],
593 4 * (head
+ available
- 1 > size
?
594 available
- 1 - size
+ head
: 0), &cmds
[0]);
598 for (i
= 1; i
< len
; i
++) {
599 data
[i
] = cmds
[head
];
600 head
= (head
+ 1) % size
;
602 CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len
, data
);
604 desc
->head
= head
* 4;
609 * DOC: CTB GuC to Host response
611 * Format of the CTB GuC to Host response message is as follows::
613 * +------------+---------+---------+---------+---------+---------+
614 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
615 * +------------+---------+---------+---------+---------+---------+
616 * | MESSAGE | MESSAGE PAYLOAD |
617 * + HEADER +---------+---------+---------+---------+---------+
618 * | | 0 | 1 | 2 | ... | n |
619 * +============+=========+=========+=========+=========+=========+
620 * | len >= 2 | FENCE | STATUS | response specific data |
621 * +------+-----+---------+---------+---------+---------+---------+
623 * ^-----------------------len-----------------------^
626 static int ct_handle_response(struct intel_guc_ct
*ct
, const u32
*msg
)
629 u32 len
= ct_header_get_len(header
);
630 u32 msglen
= len
+ 1; /* total message length including header */
634 struct ct_request
*req
;
637 GEM_BUG_ON(!ct_header_is_response(header
));
638 GEM_BUG_ON(!in_irq());
640 /* Response payload shall at least include fence and status */
641 if (unlikely(len
< 2)) {
642 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen
, msg
);
650 /* Format of the status follows RESPONSE message */
651 if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status
))) {
652 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen
, msg
);
656 CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence
, status
);
658 spin_lock(&ct
->requests
.lock
);
659 list_for_each_entry(req
, &ct
->requests
.pending
, link
) {
660 if (unlikely(fence
!= req
->fence
)) {
661 CT_DEBUG_DRIVER("CT: request %u awaits response\n",
665 if (unlikely(datalen
> req
->response_len
)) {
666 DRM_ERROR("CT: response %u too long %*ph\n",
667 req
->fence
, 4 * msglen
, msg
);
671 memcpy(req
->response_buf
, msg
+ 3, 4 * datalen
);
672 req
->response_len
= datalen
;
673 WRITE_ONCE(req
->status
, status
);
677 spin_unlock(&ct
->requests
.lock
);
680 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen
, msg
);
684 static void ct_process_request(struct intel_guc_ct
*ct
,
685 u32 action
, u32 len
, const u32
*payload
)
687 struct intel_guc
*guc
= ct_to_guc(ct
);
690 CT_DEBUG_DRIVER("CT: request %x %*ph\n", action
, 4 * len
, payload
);
693 case INTEL_GUC_ACTION_DEFAULT
:
694 ret
= intel_guc_to_host_process_recv_msg(guc
, payload
, len
);
696 goto fail_unexpected
;
701 DRM_ERROR("CT: unexpected request %x %*ph\n",
702 action
, 4 * len
, payload
);
707 static bool ct_process_incoming_requests(struct intel_guc_ct
*ct
)
710 struct ct_incoming_request
*request
;
715 spin_lock_irqsave(&ct
->requests
.lock
, flags
);
716 request
= list_first_entry_or_null(&ct
->requests
.incoming
,
717 struct ct_incoming_request
, link
);
719 list_del(&request
->link
);
720 done
= !!list_empty(&ct
->requests
.incoming
);
721 spin_unlock_irqrestore(&ct
->requests
.lock
, flags
);
726 header
= request
->msg
[0];
727 payload
= &request
->msg
[1];
728 ct_process_request(ct
,
729 ct_header_get_action(header
),
730 ct_header_get_len(header
),
737 static void ct_incoming_request_worker_func(struct work_struct
*w
)
739 struct intel_guc_ct
*ct
=
740 container_of(w
, struct intel_guc_ct
, requests
.worker
);
743 done
= ct_process_incoming_requests(ct
);
745 queue_work(system_unbound_wq
, &ct
->requests
.worker
);
749 * DOC: CTB GuC to Host request
751 * Format of the CTB GuC to Host request message is as follows::
753 * +------------+---------+---------+---------+---------+---------+
754 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
755 * +------------+---------+---------+---------+---------+---------+
756 * | MESSAGE | MESSAGE PAYLOAD |
757 * + HEADER +---------+---------+---------+---------+---------+
758 * | | 0 | 1 | 2 | ... | n |
759 * +============+=========+=========+=========+=========+=========+
760 * | len | request specific data |
761 * +------+-----+---------+---------+---------+---------+---------+
763 * ^-----------------------len-----------------------^
766 static int ct_handle_request(struct intel_guc_ct
*ct
, const u32
*msg
)
769 u32 len
= ct_header_get_len(header
);
770 u32 msglen
= len
+ 1; /* total message length including header */
771 struct ct_incoming_request
*request
;
774 GEM_BUG_ON(ct_header_is_response(header
));
776 request
= kmalloc(sizeof(*request
) + 4 * msglen
, GFP_ATOMIC
);
777 if (unlikely(!request
)) {
778 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen
, msg
);
779 return 0; /* XXX: -ENOMEM ? */
781 memcpy(request
->msg
, msg
, 4 * msglen
);
783 spin_lock_irqsave(&ct
->requests
.lock
, flags
);
784 list_add_tail(&request
->link
, &ct
->requests
.incoming
);
785 spin_unlock_irqrestore(&ct
->requests
.lock
, flags
);
787 queue_work(system_unbound_wq
, &ct
->requests
.worker
);
792 * When we're communicating with the GuC over CT, GuC uses events
793 * to notify us about new messages being posted on the RECV buffer.
795 void intel_guc_ct_event_handler(struct intel_guc_ct
*ct
)
797 struct intel_guc_ct_buffer
*ctb
= &ct
->ctbs
[CTB_RECV
];
798 u32 msg
[GUC_CT_MSG_LEN_MASK
+ 1]; /* one extra dw for the header */
801 if (unlikely(!ct
->enabled
)) {
802 WARN(1, "Unexpected GuC event received while CT disabled!\n");
807 err
= ctb_read(ctb
, msg
);
811 if (ct_header_is_response(msg
[0]))
812 err
= ct_handle_response(ct
, msg
);
814 err
= ct_handle_request(ct
, msg
);
817 if (GEM_WARN_ON(err
== -EPROTO
)) {
818 DRM_ERROR("CT: corrupted message detected!\n");
819 ctb
->desc
->is_in_error
= 1;