2 * Copyright © 2016-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_guc_ct.h"
27 #ifdef CONFIG_DRM_I915_DEBUG_GUC
28 #define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
30 #define CT_DEBUG_DRIVER(...) do { } while (0)
34 struct list_head link
;
41 struct ct_incoming_request
{
42 struct list_head link
;
46 enum { CTB_SEND
= 0, CTB_RECV
= 1 };
48 enum { CTB_OWNER_HOST
= 0 };
50 static void ct_incoming_request_worker_func(struct work_struct
*w
);
53 * intel_guc_ct_init_early - Initialize CT state without requiring device access
54 * @ct: pointer to CT struct
56 void intel_guc_ct_init_early(struct intel_guc_ct
*ct
)
58 /* we're using static channel owners */
59 ct
->host_channel
.owner
= CTB_OWNER_HOST
;
61 spin_lock_init(&ct
->lock
);
62 INIT_LIST_HEAD(&ct
->pending_requests
);
63 INIT_LIST_HEAD(&ct
->incoming_requests
);
64 INIT_WORK(&ct
->worker
, ct_incoming_request_worker_func
);
67 static inline struct intel_guc
*ct_to_guc(struct intel_guc_ct
*ct
)
69 return container_of(ct
, struct intel_guc
, ct
);
72 static inline const char *guc_ct_buffer_type_to_str(u32 type
)
75 case INTEL_GUC_CT_BUFFER_TYPE_SEND
:
77 case INTEL_GUC_CT_BUFFER_TYPE_RECV
:
84 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc
*desc
,
85 u32 cmds_addr
, u32 size
, u32 owner
)
87 CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
88 desc
, cmds_addr
, size
, owner
);
89 memset(desc
, 0, sizeof(*desc
));
90 desc
->addr
= cmds_addr
;
95 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc
*desc
)
97 CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
98 desc
, desc
->head
, desc
->tail
);
101 desc
->is_in_error
= 0;
104 static int guc_action_register_ct_buffer(struct intel_guc
*guc
,
109 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER
,
111 sizeof(struct guc_ct_buffer_desc
),
116 /* Can't use generic send(), CT registration must go over MMIO */
117 err
= intel_guc_send_mmio(guc
, action
, ARRAY_SIZE(action
), NULL
, 0);
119 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
120 guc_ct_buffer_type_to_str(type
), err
);
124 static int guc_action_deregister_ct_buffer(struct intel_guc
*guc
,
129 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER
,
135 /* Can't use generic send(), CT deregistration must go over MMIO */
136 err
= intel_guc_send_mmio(guc
, action
, ARRAY_SIZE(action
), NULL
, 0);
138 DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
139 guc_ct_buffer_type_to_str(type
), owner
, err
);
143 static bool ctch_is_open(struct intel_guc_ct_channel
*ctch
)
145 return ctch
->vma
!= NULL
;
148 static int ctch_init(struct intel_guc
*guc
,
149 struct intel_guc_ct_channel
*ctch
)
151 struct i915_vma
*vma
;
156 GEM_BUG_ON(ctch
->vma
);
158 /* We allocate 1 page to hold both descriptors and both buffers.
159 * ___________.....................
161 * |___________| PAGE/4
162 * :___________....................:
164 * |___________| PAGE/4
165 * :_______________________________:
168 * |_______________________________|
171 * |_______________________________|
173 * Each message can use a maximum of 32 dwords and we don't expect to
174 * have more than 1 in flight at any time, so we have enough space.
175 * Some logic further ahead will rely on the fact that there is only 1
176 * page and that it is always mapped, so if the size is changed the
177 * other code will need updating as well.
181 vma
= intel_guc_allocate_vma(guc
, PAGE_SIZE
);
189 blob
= i915_gem_object_pin_map(vma
->obj
, I915_MAP_WB
);
194 CT_DEBUG_DRIVER("CT: vma base=%#x\n",
195 intel_guc_ggtt_offset(guc
, ctch
->vma
));
197 /* store pointers to desc and cmds */
198 for (i
= 0; i
< ARRAY_SIZE(ctch
->ctbs
); i
++) {
199 GEM_BUG_ON((i
!= CTB_SEND
) && (i
!= CTB_RECV
));
200 ctch
->ctbs
[i
].desc
= blob
+ PAGE_SIZE
/4 * i
;
201 ctch
->ctbs
[i
].cmds
= blob
+ PAGE_SIZE
/4 * i
+ PAGE_SIZE
/2;
207 i915_vma_unpin_and_release(&ctch
->vma
, 0);
209 CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
214 static void ctch_fini(struct intel_guc
*guc
,
215 struct intel_guc_ct_channel
*ctch
)
217 i915_vma_unpin_and_release(&ctch
->vma
, I915_VMA_RELEASE_MAP
);
220 static int ctch_open(struct intel_guc
*guc
,
221 struct intel_guc_ct_channel
*ctch
)
227 CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
228 ctch
->owner
, yesno(ctch_is_open(ctch
)));
231 err
= ctch_init(guc
, ctch
);
234 GEM_BUG_ON(!ctch
->vma
);
237 /* vma should be already allocated and map'ed */
238 base
= intel_guc_ggtt_offset(guc
, ctch
->vma
);
240 /* (re)initialize descriptors
241 * cmds buffers are in the second half of the blob page
243 for (i
= 0; i
< ARRAY_SIZE(ctch
->ctbs
); i
++) {
244 GEM_BUG_ON((i
!= CTB_SEND
) && (i
!= CTB_RECV
));
245 guc_ct_buffer_desc_init(ctch
->ctbs
[i
].desc
,
246 base
+ PAGE_SIZE
/4 * i
+ PAGE_SIZE
/2,
251 /* register buffers, starting wirh RECV buffer
252 * descriptors are in first half of the blob
254 err
= guc_action_register_ct_buffer(guc
,
255 base
+ PAGE_SIZE
/4 * CTB_RECV
,
256 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
260 err
= guc_action_register_ct_buffer(guc
,
261 base
+ PAGE_SIZE
/4 * CTB_SEND
,
262 INTEL_GUC_CT_BUFFER_TYPE_SEND
);
269 guc_action_deregister_ct_buffer(guc
,
271 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
273 ctch_fini(guc
, ctch
);
275 DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch
->owner
, err
);
279 static void ctch_close(struct intel_guc
*guc
,
280 struct intel_guc_ct_channel
*ctch
)
282 GEM_BUG_ON(!ctch_is_open(ctch
));
284 guc_action_deregister_ct_buffer(guc
,
286 INTEL_GUC_CT_BUFFER_TYPE_SEND
);
287 guc_action_deregister_ct_buffer(guc
,
289 INTEL_GUC_CT_BUFFER_TYPE_RECV
);
290 ctch_fini(guc
, ctch
);
293 static u32
ctch_get_next_fence(struct intel_guc_ct_channel
*ctch
)
295 /* For now it's trivial */
296 return ++ctch
->next_fence
;
300 * DOC: CTB Host to GuC request
302 * Format of the CTB Host to GuC request message is as follows::
304 * +------------+---------+---------+---------+---------+
305 * | msg[0] | [1] | [2] | ... | [n-1] |
306 * +------------+---------+---------+---------+---------+
307 * | MESSAGE | MESSAGE PAYLOAD |
308 * + HEADER +---------+---------+---------+---------+
309 * | | 0 | 1 | ... | n |
310 * +============+=========+=========+=========+=========+
311 * | len >= 1 | FENCE | request specific data |
312 * +------+-----+---------+---------+---------+---------+
314 * ^-----------------len-------------------^
317 static int ctb_write(struct intel_guc_ct_buffer
*ctb
,
319 u32 len
/* in dwords */,
323 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
324 u32 head
= desc
->head
/ 4; /* in dwords */
325 u32 tail
= desc
->tail
/ 4; /* in dwords */
326 u32 size
= desc
->size
/ 4; /* in dwords */
327 u32 used
; /* in dwords */
329 u32
*cmds
= ctb
->cmds
;
332 GEM_BUG_ON(desc
->size
% 4);
333 GEM_BUG_ON(desc
->head
% 4);
334 GEM_BUG_ON(desc
->tail
% 4);
335 GEM_BUG_ON(tail
>= size
);
338 * tail == head condition indicates empty. GuC FW does not support
339 * using up the entire buffer to get tail == head meaning full.
342 used
= (size
- head
) + tail
;
346 /* make sure there is a space including extra dw for the fence */
347 if (unlikely(used
+ len
+ 1 >= size
))
351 * Write the message. The format is the following:
352 * DW0: header (including action code)
356 header
= (len
<< GUC_CT_MSG_LEN_SHIFT
) |
357 (GUC_CT_MSG_WRITE_FENCE_TO_DESC
) |
358 (want_response
? GUC_CT_MSG_SEND_STATUS
: 0) |
359 (action
[0] << GUC_CT_MSG_ACTION_SHIFT
);
361 CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
362 4, &header
, 4, &fence
,
363 4 * (len
- 1), &action
[1]);
366 tail
= (tail
+ 1) % size
;
369 tail
= (tail
+ 1) % size
;
371 for (i
= 1; i
< len
; i
++) {
372 cmds
[tail
] = action
[i
];
373 tail
= (tail
+ 1) % size
;
376 /* now update desc tail (back in bytes) */
377 desc
->tail
= tail
* 4;
378 GEM_BUG_ON(desc
->tail
> desc
->size
);
384 * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
385 * @desc: buffer descriptor
386 * @fence: response fence
387 * @status: placeholder for status
389 * Guc will update CT buffer descriptor with new fence and status
390 * after processing the command identified by the fence. Wait for
391 * specified fence and then read from the descriptor status of the
395 * * 0 response received (status is valid)
396 * * -ETIMEDOUT no response within hardcoded timeout
397 * * -EPROTO no response, CT buffer is in error
399 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc
*desc
,
406 * Fast commands should complete in less than 10us, so sample quickly
407 * up to that length of time, then switch to a slower sleep-wait loop.
408 * No GuC command should ever take longer than 10ms.
410 #define done (READ_ONCE(desc->fence) == fence)
411 err
= wait_for_us(done
, 10);
413 err
= wait_for(done
, 10);
417 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
420 if (WARN_ON(desc
->is_in_error
)) {
421 /* Something went wrong with the messaging, try to reset
422 * the buffer and hope for the best
424 guc_ct_buffer_desc_reset(desc
);
429 *status
= desc
->status
;
434 * wait_for_ct_request_update - Wait for CT request state update.
435 * @req: pointer to pending request
436 * @status: placeholder for status
438 * For each sent request, Guc shall send bac CT response message.
439 * Our message handler will update status of tracked request once
440 * response message with given fence is received. Wait here and
441 * check for valid response status value.
444 * * 0 response received (status is valid)
445 * * -ETIMEDOUT no response within hardcoded timeout
447 static int wait_for_ct_request_update(struct ct_request
*req
, u32
*status
)
452 * Fast commands should complete in less than 10us, so sample quickly
453 * up to that length of time, then switch to a slower sleep-wait loop.
454 * No GuC command should ever take longer than 10ms.
456 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
457 err
= wait_for_us(done
, 10);
459 err
= wait_for(done
, 10);
463 DRM_ERROR("CT: fence %u err %d\n", req
->fence
, err
);
465 *status
= req
->status
;
469 static int ctch_send(struct intel_guc_ct
*ct
,
470 struct intel_guc_ct_channel
*ctch
,
474 u32 response_buf_size
,
477 struct intel_guc_ct_buffer
*ctb
= &ctch
->ctbs
[CTB_SEND
];
478 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
479 struct ct_request request
;
484 GEM_BUG_ON(!ctch_is_open(ctch
));
486 GEM_BUG_ON(len
& ~GUC_CT_MSG_LEN_MASK
);
487 GEM_BUG_ON(!response_buf
&& response_buf_size
);
489 fence
= ctch_get_next_fence(ctch
);
490 request
.fence
= fence
;
492 request
.response_len
= response_buf_size
;
493 request
.response_buf
= response_buf
;
495 spin_lock_irqsave(&ct
->lock
, flags
);
496 list_add_tail(&request
.link
, &ct
->pending_requests
);
497 spin_unlock_irqrestore(&ct
->lock
, flags
);
499 err
= ctb_write(ctb
, action
, len
, fence
, !!response_buf
);
503 intel_guc_notify(ct_to_guc(ct
));
506 err
= wait_for_ct_request_update(&request
, status
);
508 err
= wait_for_ctb_desc_update(desc
, fence
, status
);
512 if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status
)) {
518 /* There shall be no data in the status */
519 WARN_ON(INTEL_GUC_MSG_TO_DATA(request
.status
));
520 /* Return actual response len */
521 err
= request
.response_len
;
523 /* There shall be no response payload */
524 WARN_ON(request
.response_len
);
525 /* Return data decoded from the status dword */
526 err
= INTEL_GUC_MSG_TO_DATA(*status
);
530 spin_lock_irqsave(&ct
->lock
, flags
);
531 list_del(&request
.link
);
532 spin_unlock_irqrestore(&ct
->lock
, flags
);
538 * Command Transport (CT) buffer based GuC send function.
540 static int intel_guc_send_ct(struct intel_guc
*guc
, const u32
*action
, u32 len
,
541 u32
*response_buf
, u32 response_buf_size
)
543 struct intel_guc_ct
*ct
= &guc
->ct
;
544 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
545 u32 status
= ~0; /* undefined */
548 mutex_lock(&guc
->send_mutex
);
550 ret
= ctch_send(ct
, ctch
, action
, len
, response_buf
, response_buf_size
,
552 if (unlikely(ret
< 0)) {
553 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
554 action
[0], ret
, status
);
555 } else if (unlikely(ret
)) {
556 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
557 action
[0], ret
, ret
);
560 mutex_unlock(&guc
->send_mutex
);
564 static inline unsigned int ct_header_get_len(u32 header
)
566 return (header
>> GUC_CT_MSG_LEN_SHIFT
) & GUC_CT_MSG_LEN_MASK
;
569 static inline unsigned int ct_header_get_action(u32 header
)
571 return (header
>> GUC_CT_MSG_ACTION_SHIFT
) & GUC_CT_MSG_ACTION_MASK
;
574 static inline bool ct_header_is_response(u32 header
)
576 return ct_header_get_action(header
) == INTEL_GUC_ACTION_DEFAULT
;
579 static int ctb_read(struct intel_guc_ct_buffer
*ctb
, u32
*data
)
581 struct guc_ct_buffer_desc
*desc
= ctb
->desc
;
582 u32 head
= desc
->head
/ 4; /* in dwords */
583 u32 tail
= desc
->tail
/ 4; /* in dwords */
584 u32 size
= desc
->size
/ 4; /* in dwords */
585 u32
*cmds
= ctb
->cmds
;
586 s32 available
; /* in dwords */
590 GEM_BUG_ON(desc
->size
% 4);
591 GEM_BUG_ON(desc
->head
% 4);
592 GEM_BUG_ON(desc
->tail
% 4);
593 GEM_BUG_ON(tail
>= size
);
594 GEM_BUG_ON(head
>= size
);
596 /* tail == head condition indicates empty */
597 available
= tail
- head
;
598 if (unlikely(available
== 0))
601 /* beware of buffer wrap case */
602 if (unlikely(available
< 0))
604 CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available
, head
, tail
);
605 GEM_BUG_ON(available
< 0);
607 data
[0] = cmds
[head
];
608 head
= (head
+ 1) % size
;
610 /* message len with header */
611 len
= ct_header_get_len(data
[0]) + 1;
612 if (unlikely(len
> (u32
)available
)) {
613 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
615 4 * (head
+ available
- 1 > size
?
616 size
- head
: available
- 1), &cmds
[head
],
617 4 * (head
+ available
- 1 > size
?
618 available
- 1 - size
+ head
: 0), &cmds
[0]);
622 for (i
= 1; i
< len
; i
++) {
623 data
[i
] = cmds
[head
];
624 head
= (head
+ 1) % size
;
626 CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len
, data
);
628 desc
->head
= head
* 4;
633 * DOC: CTB GuC to Host response
635 * Format of the CTB GuC to Host response message is as follows::
637 * +------------+---------+---------+---------+---------+---------+
638 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
639 * +------------+---------+---------+---------+---------+---------+
640 * | MESSAGE | MESSAGE PAYLOAD |
641 * + HEADER +---------+---------+---------+---------+---------+
642 * | | 0 | 1 | 2 | ... | n |
643 * +============+=========+=========+=========+=========+=========+
644 * | len >= 2 | FENCE | STATUS | response specific data |
645 * +------+-----+---------+---------+---------+---------+---------+
647 * ^-----------------------len-----------------------^
650 static int ct_handle_response(struct intel_guc_ct
*ct
, const u32
*msg
)
653 u32 len
= ct_header_get_len(header
);
654 u32 msglen
= len
+ 1; /* total message length including header */
658 struct ct_request
*req
;
661 GEM_BUG_ON(!ct_header_is_response(header
));
662 GEM_BUG_ON(!in_irq());
664 /* Response payload shall at least include fence and status */
665 if (unlikely(len
< 2)) {
666 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen
, msg
);
674 /* Format of the status follows RESPONSE message */
675 if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status
))) {
676 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen
, msg
);
680 CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence
, status
);
682 spin_lock(&ct
->lock
);
683 list_for_each_entry(req
, &ct
->pending_requests
, link
) {
684 if (unlikely(fence
!= req
->fence
)) {
685 CT_DEBUG_DRIVER("CT: request %u awaits response\n",
689 if (unlikely(datalen
> req
->response_len
)) {
690 DRM_ERROR("CT: response %u too long %*ph\n",
691 req
->fence
, 4 * msglen
, msg
);
695 memcpy(req
->response_buf
, msg
+ 3, 4 * datalen
);
696 req
->response_len
= datalen
;
697 WRITE_ONCE(req
->status
, status
);
701 spin_unlock(&ct
->lock
);
704 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen
, msg
);
708 static void ct_process_request(struct intel_guc_ct
*ct
,
709 u32 action
, u32 len
, const u32
*payload
)
711 struct intel_guc
*guc
= ct_to_guc(ct
);
713 CT_DEBUG_DRIVER("CT: request %x %*ph\n", action
, 4 * len
, payload
);
716 case INTEL_GUC_ACTION_DEFAULT
:
717 if (unlikely(len
< 1))
718 goto fail_unexpected
;
719 intel_guc_to_host_process_recv_msg(guc
, *payload
);
724 DRM_ERROR("CT: unexpected request %x %*ph\n",
725 action
, 4 * len
, payload
);
730 static bool ct_process_incoming_requests(struct intel_guc_ct
*ct
)
733 struct ct_incoming_request
*request
;
738 spin_lock_irqsave(&ct
->lock
, flags
);
739 request
= list_first_entry_or_null(&ct
->incoming_requests
,
740 struct ct_incoming_request
, link
);
742 list_del(&request
->link
);
743 done
= !!list_empty(&ct
->incoming_requests
);
744 spin_unlock_irqrestore(&ct
->lock
, flags
);
749 header
= request
->msg
[0];
750 payload
= &request
->msg
[1];
751 ct_process_request(ct
,
752 ct_header_get_action(header
),
753 ct_header_get_len(header
),
760 static void ct_incoming_request_worker_func(struct work_struct
*w
)
762 struct intel_guc_ct
*ct
= container_of(w
, struct intel_guc_ct
, worker
);
765 done
= ct_process_incoming_requests(ct
);
767 queue_work(system_unbound_wq
, &ct
->worker
);
771 * DOC: CTB GuC to Host request
773 * Format of the CTB GuC to Host request message is as follows::
775 * +------------+---------+---------+---------+---------+---------+
776 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
777 * +------------+---------+---------+---------+---------+---------+
778 * | MESSAGE | MESSAGE PAYLOAD |
779 * + HEADER +---------+---------+---------+---------+---------+
780 * | | 0 | 1 | 2 | ... | n |
781 * +============+=========+=========+=========+=========+=========+
782 * | len | request specific data |
783 * +------+-----+---------+---------+---------+---------+---------+
785 * ^-----------------------len-----------------------^
788 static int ct_handle_request(struct intel_guc_ct
*ct
, const u32
*msg
)
791 u32 len
= ct_header_get_len(header
);
792 u32 msglen
= len
+ 1; /* total message length including header */
793 struct ct_incoming_request
*request
;
796 GEM_BUG_ON(ct_header_is_response(header
));
798 request
= kmalloc(sizeof(*request
) + 4 * msglen
, GFP_ATOMIC
);
799 if (unlikely(!request
)) {
800 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen
, msg
);
801 return 0; /* XXX: -ENOMEM ? */
803 memcpy(request
->msg
, msg
, 4 * msglen
);
805 spin_lock_irqsave(&ct
->lock
, flags
);
806 list_add_tail(&request
->link
, &ct
->incoming_requests
);
807 spin_unlock_irqrestore(&ct
->lock
, flags
);
809 queue_work(system_unbound_wq
, &ct
->worker
);
813 static void ct_process_host_channel(struct intel_guc_ct
*ct
)
815 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
816 struct intel_guc_ct_buffer
*ctb
= &ctch
->ctbs
[CTB_RECV
];
817 u32 msg
[GUC_CT_MSG_LEN_MASK
+ 1]; /* one extra dw for the header */
820 if (!ctch_is_open(ctch
))
824 err
= ctb_read(ctb
, msg
);
828 if (ct_header_is_response(msg
[0]))
829 err
= ct_handle_response(ct
, msg
);
831 err
= ct_handle_request(ct
, msg
);
834 if (GEM_WARN_ON(err
== -EPROTO
)) {
835 DRM_ERROR("CT: corrupted message detected!\n");
836 ctb
->desc
->is_in_error
= 1;
841 * When we're communicating with the GuC over CT, GuC uses events
842 * to notify us about new messages being posted on the RECV buffer.
844 static void intel_guc_to_host_event_handler_ct(struct intel_guc
*guc
)
846 struct intel_guc_ct
*ct
= &guc
->ct
;
848 ct_process_host_channel(ct
);
852 * intel_guc_ct_enable - Enable buffer based command transport.
853 * @ct: pointer to CT struct
855 * Shall only be called for platforms with HAS_GUC_CT.
857 * Return: 0 on success, a negative errno code on failure.
859 int intel_guc_ct_enable(struct intel_guc_ct
*ct
)
861 struct intel_guc
*guc
= ct_to_guc(ct
);
862 struct drm_i915_private
*i915
= guc_to_i915(guc
);
863 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
866 GEM_BUG_ON(!HAS_GUC_CT(i915
));
868 err
= ctch_open(guc
, ctch
);
872 /* Switch into cmd transport buffer based send() */
873 guc
->send
= intel_guc_send_ct
;
874 guc
->handler
= intel_guc_to_host_event_handler_ct
;
875 DRM_INFO("CT: %s\n", enableddisabled(true));
880 * intel_guc_ct_disable - Disable buffer based command transport.
881 * @ct: pointer to CT struct
883 * Shall only be called for platforms with HAS_GUC_CT.
885 void intel_guc_ct_disable(struct intel_guc_ct
*ct
)
887 struct intel_guc
*guc
= ct_to_guc(ct
);
888 struct drm_i915_private
*i915
= guc_to_i915(guc
);
889 struct intel_guc_ct_channel
*ctch
= &ct
->host_channel
;
891 GEM_BUG_ON(!HAS_GUC_CT(i915
));
893 if (!ctch_is_open(ctch
))
896 ctch_close(guc
, ctch
);
899 guc
->send
= intel_guc_send_nop
;
900 guc
->handler
= intel_guc_to_host_event_handler_nop
;
901 DRM_INFO("CT: %s\n", enableddisabled(false));