2 * Copyright © 2016-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_guc_ct.h"
27 #ifdef CONFIG_DRM_I915_DEBUG_GUC
28 #define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
30 #define CT_DEBUG_DRIVER(...) do { } while (0)
34 struct list_head link
;
41 struct ct_incoming_request
{
42 struct list_head link
;
46 enum { CTB_SEND
= 0, CTB_RECV
= 1 };
48 enum { CTB_OWNER_HOST
= 0 };
50 static void ct_incoming_request_worker_func(struct work_struct
*w
);
53 * intel_guc_ct_init_early - Initialize CT state without requiring device access
54 * @ct: pointer to CT struct
56 void intel_guc_ct_init_early(struct intel_guc_ct
*ct
)
58 /* we're using static channel owners */
59 ct
->host_channel
.owner
= CTB_OWNER_HOST
;
61 spin_lock_init(&ct
->lock
);
62 INIT_LIST_HEAD(&ct
->pending_requests
);
63 INIT_LIST_HEAD(&ct
->incoming_requests
);
64 INIT_WORK(&ct
->worker
, ct_incoming_request_worker_func
);
67 static inline struct intel_guc
*ct_to_guc(struct intel_guc_ct
*ct
)
69 return container_of(ct
, struct intel_guc
, ct
);
72 static inline const char *guc_ct_buffer_type_to_str(u32 type
)
75 case INTEL_GUC_CT_BUFFER_TYPE_SEND
:
77 case INTEL_GUC_CT_BUFFER_TYPE_RECV
:
84 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc
*desc
,
85 u32 cmds_addr
, u32 size
, u32 owner
)
87 CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
88 desc
, cmds_addr
, size
, owner
);
89 memset(desc
, 0, sizeof(*desc
));
90 desc
->addr
= cmds_addr
;
95 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc
*desc
)
97 CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
98 desc
, desc
->head
, desc
->tail
);
101 desc
->is_in_error
= 0;
104 static int guc_action_register_ct_buffer(struct intel_guc
*guc
,
109 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER
,
111 sizeof(struct guc_ct_buffer_desc
),
116 /* Can't use generic send(), CT registration must go over MMIO */
117 err
= intel_guc_send_mmio(guc
, action
, ARRAY_SIZE(action
), NULL
, 0);
119 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
120 guc_ct_buffer_type_to_str(type
), err
);
124 static int guc_action_deregister_ct_buffer(struct intel_guc
*guc
,
129 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER
,
135 /* Can't use generic send(), CT deregistration must go over MMIO */
136 err
= intel_guc_send_mmio(guc
, action
, ARRAY_SIZE(action
), NULL
, 0);
138 DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
139 guc_ct_buffer_type_to_str(type
), owner
, err
);
143 static bool ctch_is_open(struct intel_guc_ct_channel
*ctch
)
145 return ctch
->vma
!= NULL
;
148 static int ctch_init(struct intel_guc
*guc
,
149 struct intel_guc_ct_channel
*ctch
)
151 struct i915_vma
*vma
;
156 GEM_BUG_ON(ctch
->vma
);
158 /* We allocate 1 page to hold both descriptors and both buffers.
159 * ___________.....................
161 * |___________| PAGE/4
162 * :___________....................:
164 * |___________| PAGE/4
165 * :_______________________________:
168 * |_______________________________|
171 * |_______________________________|
173 * Each message can use a maximum of 32 dwords and we don't expect to
174 * have more than 1 in flight at any time, so we have enough space.
175 * Some logic further ahead will rely on the fact that there is only 1
176 * page and that it is always mapped, so if the size is changed the
177 * other code will need updating as well.
181 vma
= intel_guc_allocate_vma(guc
, PAGE_SIZE
);
189 blob
= i915_gem_object_pin_map(vma
->obj
, I915_MAP_WB
);
194 CT_DEBUG_DRIVER("CT: vma base=%#x\n",
195 intel_guc_ggtt_offset(guc
, ctch
->vma
));
197 /* store pointers to desc and cmds */
198 for (i
= 0; i
< ARRAY_SIZE(ctch
->ctbs
); i
++) {
199 GEM_BUG_ON((i
!= CTB_SEND
) && (i
!= CTB_RECV
));
200 ctch
->ctbs
[i
].desc
= blob
+ PAGE_SIZE
/4 * i
;
201 ctch
->ctbs
[i
].cmds
= blob
+ PAGE_SIZE
/4 * i
+ PAGE_SIZE
/2;
207 i915_vma_unpin_and_release(&ctch
->vma
);
209 CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
214 static void ctch_fini(struct intel_guc
*guc
,
215 struct intel_guc_ct_channel
*ctch
)
217 GEM_BUG_ON(!ctch
->vma
);
219 i915_gem_object_unpin_map(ctch
->vma
->obj
);
220 i915_vma_unpin_and_release(&ctch
->vma
);
223 static int ctch_open(struct intel_guc
*guc
,
224 struct intel_guc_ct_channel
*ctch
)
230 CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
231 ctch
->owner
, yesno(ctch_is_open(ctch
)));
234 err
= ctch_init(guc
, ctch
);
237 GEM_BUG_ON(!ctch
->vma
);
240 /* vma should be already allocated and map'ed */
241 base
= intel_guc_ggtt_offset(guc
, ctch
->vma
);
243 /* (re)initialize descriptors
244 * cmds buffers are in the second half of the blob page
246 for (i
= 0; i
< ARRAY_SIZE(ctch
->ctbs
); i
++) {
247 GEM_BUG_ON((i
!= CTB_SEND
) && (i
!= CTB_RECV
));
248 guc_ct_buffer_desc_init(ctch
->ctbs
[i
].desc
,
249 base
+ PAGE_SIZE
/4 * i
+ PAGE_SIZE
/2,
254 /* register buffers, starting wirh RECV buffer
255 * descriptors are in first half of the blob
257 err
= guc_action_register_ct_buffer(guc
,
258 base
+ PAGE_SIZE
/4 * CTB_RECV
,
259 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
263 err
= guc_action_register_ct_buffer(guc
,
264 base
+ PAGE_SIZE
/4 * CTB_SEND
,
265 INTEL_GUC_CT_BUFFER_TYPE_SEND
);
272 guc_action_deregister_ct_buffer(guc
,
274 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
276 ctch_fini(guc
, ctch
);
278 DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch
->owner
, err
);
282 static void ctch_close(struct intel_guc
*guc
,
283 struct intel_guc_ct_channel
*ctch
)
285 GEM_BUG_ON(!ctch_is_open(ctch
));
287 guc_action_deregister_ct_buffer(guc
,
289 INTEL_GUC_CT_BUFFER_TYPE_SEND
);
290 guc_action_deregister_ct_buffer(guc
,
292 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
293 ctch_fini(guc
, ctch
);
296 static u32
ctch_get_next_fence(struct intel_guc_ct_channel
*ctch
)
298 /* For now it's trivial */
299 return ++ctch
->next_fence
;
303 * DOC: CTB Host to GuC request
305 * Format of the CTB Host to GuC request message is as follows::
307 * +------------+---------+---------+---------+---------+
308 * | msg[0] | [1] | [2] | ... | [n-1] |
309 * +------------+---------+---------+---------+---------+
310 * | MESSAGE | MESSAGE PAYLOAD |
311 * + HEADER +---------+---------+---------+---------+
312 * | | 0 | 1 | ... | n |
313 * +============+=========+=========+=========+=========+
314 * | len >= 1 | FENCE | request specific data |
315 * +------+-----+---------+---------+---------+---------+
317 * ^-----------------len-------------------^
320 static int ctb_write(struct intel_guc_ct_buffer
*ctb
,
322 u32 len
/* in dwords */,
326 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
327 u32 head
= desc
->head
/ 4; /* in dwords */
328 u32 tail
= desc
->tail
/ 4; /* in dwords */
329 u32 size
= desc
->size
/ 4; /* in dwords */
330 u32 used
; /* in dwords */
332 u32
*cmds
= ctb
->cmds
;
335 GEM_BUG_ON(desc
->size
% 4);
336 GEM_BUG_ON(desc
->head
% 4);
337 GEM_BUG_ON(desc
->tail
% 4);
338 GEM_BUG_ON(tail
>= size
);
341 * tail == head condition indicates empty. GuC FW does not support
342 * using up the entire buffer to get tail == head meaning full.
345 used
= (size
- head
) + tail
;
349 /* make sure there is a space including extra dw for the fence */
350 if (unlikely(used
+ len
+ 1 >= size
))
354 * Write the message. The format is the following:
355 * DW0: header (including action code)
359 header
= (len
<< GUC_CT_MSG_LEN_SHIFT
) |
360 (GUC_CT_MSG_WRITE_FENCE_TO_DESC
) |
361 (want_response
? GUC_CT_MSG_SEND_STATUS
: 0) |
362 (action
[0] << GUC_CT_MSG_ACTION_SHIFT
);
364 CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
365 4, &header
, 4, &fence
,
366 4 * (len
- 1), &action
[1]);
369 tail
= (tail
+ 1) % size
;
372 tail
= (tail
+ 1) % size
;
374 for (i
= 1; i
< len
; i
++) {
375 cmds
[tail
] = action
[i
];
376 tail
= (tail
+ 1) % size
;
379 /* now update desc tail (back in bytes) */
380 desc
->tail
= tail
* 4;
381 GEM_BUG_ON(desc
->tail
> desc
->size
);
387 * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
388 * @desc: buffer descriptor
389 * @fence: response fence
390 * @status: placeholder for status
392 * Guc will update CT buffer descriptor with new fence and status
393 * after processing the command identified by the fence. Wait for
394 * specified fence and then read from the descriptor status of the
398 * * 0 response received (status is valid)
399 * * -ETIMEDOUT no response within hardcoded timeout
400 * * -EPROTO no response, CT buffer is in error
402 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc
*desc
,
409 * Fast commands should complete in less than 10us, so sample quickly
410 * up to that length of time, then switch to a slower sleep-wait loop.
411 * No GuC command should ever take longer than 10ms.
413 #define done (READ_ONCE(desc->fence) == fence)
414 err
= wait_for_us(done
, 10);
416 err
= wait_for(done
, 10);
420 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
423 if (WARN_ON(desc
->is_in_error
)) {
424 /* Something went wrong with the messaging, try to reset
425 * the buffer and hope for the best
427 guc_ct_buffer_desc_reset(desc
);
432 *status
= desc
->status
;
437 * wait_for_ct_request_update - Wait for CT request state update.
438 * @req: pointer to pending request
439 * @status: placeholder for status
441 * For each sent request, Guc shall send bac CT response message.
442 * Our message handler will update status of tracked request once
443 * response message with given fence is received. Wait here and
444 * check for valid response status value.
447 * * 0 response received (status is valid)
448 * * -ETIMEDOUT no response within hardcoded timeout
450 static int wait_for_ct_request_update(struct ct_request
*req
, u32
*status
)
455 * Fast commands should complete in less than 10us, so sample quickly
456 * up to that length of time, then switch to a slower sleep-wait loop.
457 * No GuC command should ever take longer than 10ms.
459 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
460 err
= wait_for_us(done
, 10);
462 err
= wait_for(done
, 10);
466 DRM_ERROR("CT: fence %u err %d\n", req
->fence
, err
);
468 *status
= req
->status
;
472 static int ctch_send(struct intel_guc_ct
*ct
,
473 struct intel_guc_ct_channel
*ctch
,
477 u32 response_buf_size
,
480 struct intel_guc_ct_buffer
*ctb
= &ctch
->ctbs
[CTB_SEND
];
481 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
482 struct ct_request request
;
487 GEM_BUG_ON(!ctch_is_open(ctch
));
489 GEM_BUG_ON(len
& ~GUC_CT_MSG_LEN_MASK
);
490 GEM_BUG_ON(!response_buf
&& response_buf_size
);
492 fence
= ctch_get_next_fence(ctch
);
493 request
.fence
= fence
;
495 request
.response_len
= response_buf_size
;
496 request
.response_buf
= response_buf
;
498 spin_lock_irqsave(&ct
->lock
, flags
);
499 list_add_tail(&request
.link
, &ct
->pending_requests
);
500 spin_unlock_irqrestore(&ct
->lock
, flags
);
502 err
= ctb_write(ctb
, action
, len
, fence
, !!response_buf
);
506 intel_guc_notify(ct_to_guc(ct
));
509 err
= wait_for_ct_request_update(&request
, status
);
511 err
= wait_for_ctb_desc_update(desc
, fence
, status
);
515 if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status
)) {
521 /* There shall be no data in the status */
522 WARN_ON(INTEL_GUC_MSG_TO_DATA(request
.status
));
523 /* Return actual response len */
524 err
= request
.response_len
;
526 /* There shall be no response payload */
527 WARN_ON(request
.response_len
);
528 /* Return data decoded from the status dword */
529 err
= INTEL_GUC_MSG_TO_DATA(*status
);
533 spin_lock_irqsave(&ct
->lock
, flags
);
534 list_del(&request
.link
);
535 spin_unlock_irqrestore(&ct
->lock
, flags
);
541 * Command Transport (CT) buffer based GuC send function.
543 static int intel_guc_send_ct(struct intel_guc
*guc
, const u32
*action
, u32 len
,
544 u32
*response_buf
, u32 response_buf_size
)
546 struct intel_guc_ct
*ct
= &guc
->ct
;
547 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
548 u32 status
= ~0; /* undefined */
551 mutex_lock(&guc
->send_mutex
);
553 ret
= ctch_send(ct
, ctch
, action
, len
, response_buf
, response_buf_size
,
555 if (unlikely(ret
< 0)) {
556 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
557 action
[0], ret
, status
);
558 } else if (unlikely(ret
)) {
559 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
560 action
[0], ret
, ret
);
563 mutex_unlock(&guc
->send_mutex
);
567 static inline unsigned int ct_header_get_len(u32 header
)
569 return (header
>> GUC_CT_MSG_LEN_SHIFT
) & GUC_CT_MSG_LEN_MASK
;
572 static inline unsigned int ct_header_get_action(u32 header
)
574 return (header
>> GUC_CT_MSG_ACTION_SHIFT
) & GUC_CT_MSG_ACTION_MASK
;
577 static inline bool ct_header_is_response(u32 header
)
579 return ct_header_get_action(header
) == INTEL_GUC_ACTION_DEFAULT
;
582 static int ctb_read(struct intel_guc_ct_buffer
*ctb
, u32
*data
)
584 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
585 u32 head
= desc
->head
/ 4; /* in dwords */
586 u32 tail
= desc
->tail
/ 4; /* in dwords */
587 u32 size
= desc
->size
/ 4; /* in dwords */
588 u32
*cmds
= ctb
->cmds
;
589 s32 available
; /* in dwords */
593 GEM_BUG_ON(desc
->size
% 4);
594 GEM_BUG_ON(desc
->head
% 4);
595 GEM_BUG_ON(desc
->tail
% 4);
596 GEM_BUG_ON(tail
>= size
);
597 GEM_BUG_ON(head
>= size
);
599 /* tail == head condition indicates empty */
600 available
= tail
- head
;
601 if (unlikely(available
== 0))
604 /* beware of buffer wrap case */
605 if (unlikely(available
< 0))
607 CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available
, head
, tail
);
608 GEM_BUG_ON(available
< 0);
610 data
[0] = cmds
[head
];
611 head
= (head
+ 1) % size
;
613 /* message len with header */
614 len
= ct_header_get_len(data
[0]) + 1;
615 if (unlikely(len
> (u32
)available
)) {
616 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
618 4 * (head
+ available
- 1 > size
?
619 size
- head
: available
- 1), &cmds
[head
],
620 4 * (head
+ available
- 1 > size
?
621 available
- 1 - size
+ head
: 0), &cmds
[0]);
625 for (i
= 1; i
< len
; i
++) {
626 data
[i
] = cmds
[head
];
627 head
= (head
+ 1) % size
;
629 CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len
, data
);
631 desc
->head
= head
* 4;
636 * DOC: CTB GuC to Host response
638 * Format of the CTB GuC to Host response message is as follows::
640 * +------------+---------+---------+---------+---------+---------+
641 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
642 * +------------+---------+---------+---------+---------+---------+
643 * | MESSAGE | MESSAGE PAYLOAD |
644 * + HEADER +---------+---------+---------+---------+---------+
645 * | | 0 | 1 | 2 | ... | n |
646 * +============+=========+=========+=========+=========+=========+
647 * | len >= 2 | FENCE | STATUS | response specific data |
648 * +------+-----+---------+---------+---------+---------+---------+
650 * ^-----------------------len-----------------------^
653 static int ct_handle_response(struct intel_guc_ct
*ct
, const u32
*msg
)
656 u32 len
= ct_header_get_len(header
);
657 u32 msglen
= len
+ 1; /* total message length including header */
661 struct ct_request
*req
;
664 GEM_BUG_ON(!ct_header_is_response(header
));
665 GEM_BUG_ON(!in_irq());
667 /* Response payload shall at least include fence and status */
668 if (unlikely(len
< 2)) {
669 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen
, msg
);
677 /* Format of the status follows RESPONSE message */
678 if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status
))) {
679 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen
, msg
);
683 CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence
, status
);
685 spin_lock(&ct
->lock
);
686 list_for_each_entry(req
, &ct
->pending_requests
, link
) {
687 if (unlikely(fence
!= req
->fence
)) {
688 CT_DEBUG_DRIVER("CT: request %u awaits response\n",
692 if (unlikely(datalen
> req
->response_len
)) {
693 DRM_ERROR("CT: response %u too long %*ph\n",
694 req
->fence
, 4 * msglen
, msg
);
698 memcpy(req
->response_buf
, msg
+ 3, 4 * datalen
);
699 req
->response_len
= datalen
;
700 WRITE_ONCE(req
->status
, status
);
704 spin_unlock(&ct
->lock
);
707 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen
, msg
);
711 static void ct_process_request(struct intel_guc_ct
*ct
,
712 u32 action
, u32 len
, const u32
*payload
)
714 struct intel_guc
*guc
= ct_to_guc(ct
);
716 CT_DEBUG_DRIVER("CT: request %x %*ph\n", action
, 4 * len
, payload
);
719 case INTEL_GUC_ACTION_DEFAULT
:
720 if (unlikely(len
< 1))
721 goto fail_unexpected
;
722 intel_guc_to_host_process_recv_msg(guc
, *payload
);
727 DRM_ERROR("CT: unexpected request %x %*ph\n",
728 action
, 4 * len
, payload
);
733 static bool ct_process_incoming_requests(struct intel_guc_ct
*ct
)
736 struct ct_incoming_request
*request
;
741 spin_lock_irqsave(&ct
->lock
, flags
);
742 request
= list_first_entry_or_null(&ct
->incoming_requests
,
743 struct ct_incoming_request
, link
);
745 list_del(&request
->link
);
746 done
= !!list_empty(&ct
->incoming_requests
);
747 spin_unlock_irqrestore(&ct
->lock
, flags
);
752 header
= request
->msg
[0];
753 payload
= &request
->msg
[1];
754 ct_process_request(ct
,
755 ct_header_get_action(header
),
756 ct_header_get_len(header
),
763 static void ct_incoming_request_worker_func(struct work_struct
*w
)
765 struct intel_guc_ct
*ct
= container_of(w
, struct intel_guc_ct
, worker
);
768 done
= ct_process_incoming_requests(ct
);
770 queue_work(system_unbound_wq
, &ct
->worker
);
774 * DOC: CTB GuC to Host request
776 * Format of the CTB GuC to Host request message is as follows::
778 * +------------+---------+---------+---------+---------+---------+
779 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
780 * +------------+---------+---------+---------+---------+---------+
781 * | MESSAGE | MESSAGE PAYLOAD |
782 * + HEADER +---------+---------+---------+---------+---------+
783 * | | 0 | 1 | 2 | ... | n |
784 * +============+=========+=========+=========+=========+=========+
785 * | len | request specific data |
786 * +------+-----+---------+---------+---------+---------+---------+
788 * ^-----------------------len-----------------------^
791 static int ct_handle_request(struct intel_guc_ct
*ct
, const u32
*msg
)
794 u32 len
= ct_header_get_len(header
);
795 u32 msglen
= len
+ 1; /* total message length including header */
796 struct ct_incoming_request
*request
;
799 GEM_BUG_ON(ct_header_is_response(header
));
801 request
= kmalloc(sizeof(*request
) + 4 * msglen
, GFP_ATOMIC
);
802 if (unlikely(!request
)) {
803 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen
, msg
);
804 return 0; /* XXX: -ENOMEM ? */
806 memcpy(request
->msg
, msg
, 4 * msglen
);
808 spin_lock_irqsave(&ct
->lock
, flags
);
809 list_add_tail(&request
->link
, &ct
->incoming_requests
);
810 spin_unlock_irqrestore(&ct
->lock
, flags
);
812 queue_work(system_unbound_wq
, &ct
->worker
);
816 static void ct_process_host_channel(struct intel_guc_ct
*ct
)
818 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
819 struct intel_guc_ct_buffer
*ctb
= &ctch
->ctbs
[CTB_RECV
];
820 u32 msg
[GUC_CT_MSG_LEN_MASK
+ 1]; /* one extra dw for the header */
823 if (!ctch_is_open(ctch
))
827 err
= ctb_read(ctb
, msg
);
831 if (ct_header_is_response(msg
[0]))
832 err
= ct_handle_response(ct
, msg
);
834 err
= ct_handle_request(ct
, msg
);
837 if (GEM_WARN_ON(err
== -EPROTO
)) {
838 DRM_ERROR("CT: corrupted message detected!\n");
839 ctb
->desc
->is_in_error
= 1;
844 * When we're communicating with the GuC over CT, GuC uses events
845 * to notify us about new messages being posted on the RECV buffer.
847 static void intel_guc_to_host_event_handler_ct(struct intel_guc
*guc
)
849 struct intel_guc_ct
*ct
= &guc
->ct
;
851 ct_process_host_channel(ct
);
855 * intel_guc_ct_enable - Enable buffer based command transport.
856 * @ct: pointer to CT struct
858 * Shall only be called for platforms with HAS_GUC_CT.
860 * Return: 0 on success, a negative errno code on failure.
862 int intel_guc_ct_enable(struct intel_guc_ct
*ct
)
864 struct intel_guc
*guc
= ct_to_guc(ct
);
865 struct drm_i915_private
*i915
= guc_to_i915(guc
);
866 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
869 GEM_BUG_ON(!HAS_GUC_CT(i915
));
871 err
= ctch_open(guc
, ctch
);
875 /* Switch into cmd transport buffer based send() */
876 guc
->send
= intel_guc_send_ct
;
877 guc
->handler
= intel_guc_to_host_event_handler_ct
;
878 DRM_INFO("CT: %s\n", enableddisabled(true));
883 * intel_guc_ct_disable - Disable buffer based command transport.
884 * @ct: pointer to CT struct
886 * Shall only be called for platforms with HAS_GUC_CT.
888 void intel_guc_ct_disable(struct intel_guc_ct
*ct
)
890 struct intel_guc
*guc
= ct_to_guc(ct
);
891 struct drm_i915_private
*i915
= guc_to_i915(guc
);
892 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
894 GEM_BUG_ON(!HAS_GUC_CT(i915
));
896 if (!ctch_is_open(ctch
))
899 ctch_close(guc
, ctch
);
902 guc
->send
= intel_guc_send_nop
;
903 guc
->handler
= intel_guc_to_host_event_handler_nop
;
904 DRM_INFO("CT: %s\n", enableddisabled(false));