vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / intel_guc_ct.c
blob371b6005954aab09b69c15704343153c73e8d5a5
1 /*
2 * Copyright © 2016-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
24 #include "i915_drv.h"
25 #include "intel_guc_ct.h"
27 #ifdef CONFIG_DRM_I915_DEBUG_GUC
28 #define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
29 #else
30 #define CT_DEBUG_DRIVER(...) do { } while (0)
31 #endif
33 struct ct_request {
34 struct list_head link;
35 u32 fence;
36 u32 status;
37 u32 response_len;
38 u32 *response_buf;
41 struct ct_incoming_request {
42 struct list_head link;
43 u32 msg[];
46 enum { CTB_SEND = 0, CTB_RECV = 1 };
48 enum { CTB_OWNER_HOST = 0 };
50 static void ct_incoming_request_worker_func(struct work_struct *w);
52 /**
53 * intel_guc_ct_init_early - Initialize CT state without requiring device access
54 * @ct: pointer to CT struct
56 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
58 /* we're using static channel owners */
59 ct->host_channel.owner = CTB_OWNER_HOST;
61 spin_lock_init(&ct->lock);
62 INIT_LIST_HEAD(&ct->pending_requests);
63 INIT_LIST_HEAD(&ct->incoming_requests);
64 INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
67 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
69 return container_of(ct, struct intel_guc, ct);
72 static inline const char *guc_ct_buffer_type_to_str(u32 type)
74 switch (type) {
75 case INTEL_GUC_CT_BUFFER_TYPE_SEND:
76 return "SEND";
77 case INTEL_GUC_CT_BUFFER_TYPE_RECV:
78 return "RECV";
79 default:
80 return "<invalid>";
84 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
85 u32 cmds_addr, u32 size, u32 owner)
87 CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
88 desc, cmds_addr, size, owner);
89 memset(desc, 0, sizeof(*desc));
90 desc->addr = cmds_addr;
91 desc->size = size;
92 desc->owner = owner;
95 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
97 CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
98 desc, desc->head, desc->tail);
99 desc->head = 0;
100 desc->tail = 0;
101 desc->is_in_error = 0;
104 static int guc_action_register_ct_buffer(struct intel_guc *guc,
105 u32 desc_addr,
106 u32 type)
108 u32 action[] = {
109 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
110 desc_addr,
111 sizeof(struct guc_ct_buffer_desc),
112 type
114 int err;
116 /* Can't use generic send(), CT registration must go over MMIO */
117 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
118 if (err)
119 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
120 guc_ct_buffer_type_to_str(type), err);
121 return err;
124 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
125 u32 owner,
126 u32 type)
128 u32 action[] = {
129 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
130 owner,
131 type
133 int err;
135 /* Can't use generic send(), CT deregistration must go over MMIO */
136 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
137 if (err)
138 DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
139 guc_ct_buffer_type_to_str(type), owner, err);
140 return err;
143 static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
145 return ctch->vma != NULL;
148 static int ctch_init(struct intel_guc *guc,
149 struct intel_guc_ct_channel *ctch)
151 struct i915_vma *vma;
152 void *blob;
153 int err;
154 int i;
156 GEM_BUG_ON(ctch->vma);
158 /* We allocate 1 page to hold both descriptors and both buffers.
159 * ___________.....................
160 * |desc (SEND)| :
161 * |___________| PAGE/4
162 * :___________....................:
163 * |desc (RECV)| :
164 * |___________| PAGE/4
165 * :_______________________________:
166 * |cmds (SEND) |
167 * | PAGE/4
168 * |_______________________________|
169 * |cmds (RECV) |
170 * | PAGE/4
171 * |_______________________________|
173 * Each message can use a maximum of 32 dwords and we don't expect to
174 * have more than 1 in flight at any time, so we have enough space.
175 * Some logic further ahead will rely on the fact that there is only 1
176 * page and that it is always mapped, so if the size is changed the
177 * other code will need updating as well.
180 /* allocate vma */
181 vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
182 if (IS_ERR(vma)) {
183 err = PTR_ERR(vma);
184 goto err_out;
186 ctch->vma = vma;
188 /* map first page */
189 blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
190 if (IS_ERR(blob)) {
191 err = PTR_ERR(blob);
192 goto err_vma;
194 CT_DEBUG_DRIVER("CT: vma base=%#x\n",
195 intel_guc_ggtt_offset(guc, ctch->vma));
197 /* store pointers to desc and cmds */
198 for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
199 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
200 ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
201 ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
204 return 0;
206 err_vma:
207 i915_vma_unpin_and_release(&ctch->vma);
208 err_out:
209 CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
210 ctch->owner, err);
211 return err;
214 static void ctch_fini(struct intel_guc *guc,
215 struct intel_guc_ct_channel *ctch)
217 GEM_BUG_ON(!ctch->vma);
219 i915_gem_object_unpin_map(ctch->vma->obj);
220 i915_vma_unpin_and_release(&ctch->vma);
223 static int ctch_open(struct intel_guc *guc,
224 struct intel_guc_ct_channel *ctch)
226 u32 base;
227 int err;
228 int i;
230 CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
231 ctch->owner, yesno(ctch_is_open(ctch)));
233 if (!ctch->vma) {
234 err = ctch_init(guc, ctch);
235 if (unlikely(err))
236 goto err_out;
237 GEM_BUG_ON(!ctch->vma);
240 /* vma should be already allocated and map'ed */
241 base = intel_guc_ggtt_offset(guc, ctch->vma);
243 /* (re)initialize descriptors
244 * cmds buffers are in the second half of the blob page
246 for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
247 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
248 guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
249 base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
250 PAGE_SIZE/4,
251 ctch->owner);
254 /* register buffers, starting wirh RECV buffer
255 * descriptors are in first half of the blob
257 err = guc_action_register_ct_buffer(guc,
258 base + PAGE_SIZE/4 * CTB_RECV,
259 INTEL_GUC_CT_BUFFER_TYPE_RECV);
260 if (unlikely(err))
261 goto err_fini;
263 err = guc_action_register_ct_buffer(guc,
264 base + PAGE_SIZE/4 * CTB_SEND,
265 INTEL_GUC_CT_BUFFER_TYPE_SEND);
266 if (unlikely(err))
267 goto err_deregister;
269 return 0;
271 err_deregister:
272 guc_action_deregister_ct_buffer(guc,
273 ctch->owner,
274 INTEL_GUC_CT_BUFFER_TYPE_RECV);
275 err_fini:
276 ctch_fini(guc, ctch);
277 err_out:
278 DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
279 return err;
282 static void ctch_close(struct intel_guc *guc,
283 struct intel_guc_ct_channel *ctch)
285 GEM_BUG_ON(!ctch_is_open(ctch));
287 guc_action_deregister_ct_buffer(guc,
288 ctch->owner,
289 INTEL_GUC_CT_BUFFER_TYPE_SEND);
290 guc_action_deregister_ct_buffer(guc,
291 ctch->owner,
292 INTEL_GUC_CT_BUFFER_TYPE_RECV);
293 ctch_fini(guc, ctch);
296 static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
298 /* For now it's trivial */
299 return ++ctch->next_fence;
303 * DOC: CTB Host to GuC request
305 * Format of the CTB Host to GuC request message is as follows::
307 * +------------+---------+---------+---------+---------+
308 * | msg[0] | [1] | [2] | ... | [n-1] |
309 * +------------+---------+---------+---------+---------+
310 * | MESSAGE | MESSAGE PAYLOAD |
311 * + HEADER +---------+---------+---------+---------+
312 * | | 0 | 1 | ... | n |
313 * +============+=========+=========+=========+=========+
314 * | len >= 1 | FENCE | request specific data |
315 * +------+-----+---------+---------+---------+---------+
317 * ^-----------------len-------------------^
320 static int ctb_write(struct intel_guc_ct_buffer *ctb,
321 const u32 *action,
322 u32 len /* in dwords */,
323 u32 fence,
324 bool want_response)
326 struct guc_ct_buffer_desc *desc = ctb->desc;
327 u32 head = desc->head / 4; /* in dwords */
328 u32 tail = desc->tail / 4; /* in dwords */
329 u32 size = desc->size / 4; /* in dwords */
330 u32 used; /* in dwords */
331 u32 header;
332 u32 *cmds = ctb->cmds;
333 unsigned int i;
335 GEM_BUG_ON(desc->size % 4);
336 GEM_BUG_ON(desc->head % 4);
337 GEM_BUG_ON(desc->tail % 4);
338 GEM_BUG_ON(tail >= size);
341 * tail == head condition indicates empty. GuC FW does not support
342 * using up the entire buffer to get tail == head meaning full.
344 if (tail < head)
345 used = (size - head) + tail;
346 else
347 used = tail - head;
349 /* make sure there is a space including extra dw for the fence */
350 if (unlikely(used + len + 1 >= size))
351 return -ENOSPC;
354 * Write the message. The format is the following:
355 * DW0: header (including action code)
356 * DW1: fence
357 * DW2+: action data
359 header = (len << GUC_CT_MSG_LEN_SHIFT) |
360 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
361 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
362 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
364 CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
365 4, &header, 4, &fence,
366 4 * (len - 1), &action[1]);
368 cmds[tail] = header;
369 tail = (tail + 1) % size;
371 cmds[tail] = fence;
372 tail = (tail + 1) % size;
374 for (i = 1; i < len; i++) {
375 cmds[tail] = action[i];
376 tail = (tail + 1) % size;
379 /* now update desc tail (back in bytes) */
380 desc->tail = tail * 4;
381 GEM_BUG_ON(desc->tail > desc->size);
383 return 0;
387 * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
388 * @desc: buffer descriptor
389 * @fence: response fence
390 * @status: placeholder for status
392 * Guc will update CT buffer descriptor with new fence and status
393 * after processing the command identified by the fence. Wait for
394 * specified fence and then read from the descriptor status of the
395 * command.
397 * Return:
398 * * 0 response received (status is valid)
399 * * -ETIMEDOUT no response within hardcoded timeout
400 * * -EPROTO no response, CT buffer is in error
402 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
403 u32 fence,
404 u32 *status)
406 int err;
409 * Fast commands should complete in less than 10us, so sample quickly
410 * up to that length of time, then switch to a slower sleep-wait loop.
411 * No GuC command should ever take longer than 10ms.
413 #define done (READ_ONCE(desc->fence) == fence)
414 err = wait_for_us(done, 10);
415 if (err)
416 err = wait_for(done, 10);
417 #undef done
419 if (unlikely(err)) {
420 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
421 fence, desc->fence);
423 if (WARN_ON(desc->is_in_error)) {
424 /* Something went wrong with the messaging, try to reset
425 * the buffer and hope for the best
427 guc_ct_buffer_desc_reset(desc);
428 err = -EPROTO;
432 *status = desc->status;
433 return err;
437 * wait_for_ct_request_update - Wait for CT request state update.
438 * @req: pointer to pending request
439 * @status: placeholder for status
441 * For each sent request, Guc shall send bac CT response message.
442 * Our message handler will update status of tracked request once
443 * response message with given fence is received. Wait here and
444 * check for valid response status value.
446 * Return:
447 * * 0 response received (status is valid)
448 * * -ETIMEDOUT no response within hardcoded timeout
450 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
452 int err;
455 * Fast commands should complete in less than 10us, so sample quickly
456 * up to that length of time, then switch to a slower sleep-wait loop.
457 * No GuC command should ever take longer than 10ms.
459 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
460 err = wait_for_us(done, 10);
461 if (err)
462 err = wait_for(done, 10);
463 #undef done
465 if (unlikely(err))
466 DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
468 *status = req->status;
469 return err;
472 static int ctch_send(struct intel_guc_ct *ct,
473 struct intel_guc_ct_channel *ctch,
474 const u32 *action,
475 u32 len,
476 u32 *response_buf,
477 u32 response_buf_size,
478 u32 *status)
480 struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
481 struct guc_ct_buffer_desc *desc = ctb->desc;
482 struct ct_request request;
483 unsigned long flags;
484 u32 fence;
485 int err;
487 GEM_BUG_ON(!ctch_is_open(ctch));
488 GEM_BUG_ON(!len);
489 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
490 GEM_BUG_ON(!response_buf && response_buf_size);
492 fence = ctch_get_next_fence(ctch);
493 request.fence = fence;
494 request.status = 0;
495 request.response_len = response_buf_size;
496 request.response_buf = response_buf;
498 spin_lock_irqsave(&ct->lock, flags);
499 list_add_tail(&request.link, &ct->pending_requests);
500 spin_unlock_irqrestore(&ct->lock, flags);
502 err = ctb_write(ctb, action, len, fence, !!response_buf);
503 if (unlikely(err))
504 goto unlink;
506 intel_guc_notify(ct_to_guc(ct));
508 if (response_buf)
509 err = wait_for_ct_request_update(&request, status);
510 else
511 err = wait_for_ctb_desc_update(desc, fence, status);
512 if (unlikely(err))
513 goto unlink;
515 if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
516 err = -EIO;
517 goto unlink;
520 if (response_buf) {
521 /* There shall be no data in the status */
522 WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
523 /* Return actual response len */
524 err = request.response_len;
525 } else {
526 /* There shall be no response payload */
527 WARN_ON(request.response_len);
528 /* Return data decoded from the status dword */
529 err = INTEL_GUC_MSG_TO_DATA(*status);
532 unlink:
533 spin_lock_irqsave(&ct->lock, flags);
534 list_del(&request.link);
535 spin_unlock_irqrestore(&ct->lock, flags);
537 return err;
541 * Command Transport (CT) buffer based GuC send function.
543 static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
544 u32 *response_buf, u32 response_buf_size)
546 struct intel_guc_ct *ct = &guc->ct;
547 struct intel_guc_ct_channel *ctch = &ct->host_channel;
548 u32 status = ~0; /* undefined */
549 int ret;
551 mutex_lock(&guc->send_mutex);
553 ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
554 &status);
555 if (unlikely(ret < 0)) {
556 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
557 action[0], ret, status);
558 } else if (unlikely(ret)) {
559 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
560 action[0], ret, ret);
563 mutex_unlock(&guc->send_mutex);
564 return ret;
567 static inline unsigned int ct_header_get_len(u32 header)
569 return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
572 static inline unsigned int ct_header_get_action(u32 header)
574 return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
577 static inline bool ct_header_is_response(u32 header)
579 return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
582 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
584 struct guc_ct_buffer_desc *desc = ctb->desc;
585 u32 head = desc->head / 4; /* in dwords */
586 u32 tail = desc->tail / 4; /* in dwords */
587 u32 size = desc->size / 4; /* in dwords */
588 u32 *cmds = ctb->cmds;
589 s32 available; /* in dwords */
590 unsigned int len;
591 unsigned int i;
593 GEM_BUG_ON(desc->size % 4);
594 GEM_BUG_ON(desc->head % 4);
595 GEM_BUG_ON(desc->tail % 4);
596 GEM_BUG_ON(tail >= size);
597 GEM_BUG_ON(head >= size);
599 /* tail == head condition indicates empty */
600 available = tail - head;
601 if (unlikely(available == 0))
602 return -ENODATA;
604 /* beware of buffer wrap case */
605 if (unlikely(available < 0))
606 available += size;
607 CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
608 GEM_BUG_ON(available < 0);
610 data[0] = cmds[head];
611 head = (head + 1) % size;
613 /* message len with header */
614 len = ct_header_get_len(data[0]) + 1;
615 if (unlikely(len > (u32)available)) {
616 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
617 4, data,
618 4 * (head + available - 1 > size ?
619 size - head : available - 1), &cmds[head],
620 4 * (head + available - 1 > size ?
621 available - 1 - size + head : 0), &cmds[0]);
622 return -EPROTO;
625 for (i = 1; i < len; i++) {
626 data[i] = cmds[head];
627 head = (head + 1) % size;
629 CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
631 desc->head = head * 4;
632 return 0;
636 * DOC: CTB GuC to Host response
638 * Format of the CTB GuC to Host response message is as follows::
640 * +------------+---------+---------+---------+---------+---------+
641 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
642 * +------------+---------+---------+---------+---------+---------+
643 * | MESSAGE | MESSAGE PAYLOAD |
644 * + HEADER +---------+---------+---------+---------+---------+
645 * | | 0 | 1 | 2 | ... | n |
646 * +============+=========+=========+=========+=========+=========+
647 * | len >= 2 | FENCE | STATUS | response specific data |
648 * +------+-----+---------+---------+---------+---------+---------+
650 * ^-----------------------len-----------------------^
653 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
655 u32 header = msg[0];
656 u32 len = ct_header_get_len(header);
657 u32 msglen = len + 1; /* total message length including header */
658 u32 fence;
659 u32 status;
660 u32 datalen;
661 struct ct_request *req;
662 bool found = false;
664 GEM_BUG_ON(!ct_header_is_response(header));
665 GEM_BUG_ON(!in_irq());
667 /* Response payload shall at least include fence and status */
668 if (unlikely(len < 2)) {
669 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
670 return -EPROTO;
673 fence = msg[1];
674 status = msg[2];
675 datalen = len - 2;
677 /* Format of the status follows RESPONSE message */
678 if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
679 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
680 return -EPROTO;
683 CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
685 spin_lock(&ct->lock);
686 list_for_each_entry(req, &ct->pending_requests, link) {
687 if (unlikely(fence != req->fence)) {
688 CT_DEBUG_DRIVER("CT: request %u awaits response\n",
689 req->fence);
690 continue;
692 if (unlikely(datalen > req->response_len)) {
693 DRM_ERROR("CT: response %u too long %*ph\n",
694 req->fence, 4 * msglen, msg);
695 datalen = 0;
697 if (datalen)
698 memcpy(req->response_buf, msg + 3, 4 * datalen);
699 req->response_len = datalen;
700 WRITE_ONCE(req->status, status);
701 found = true;
702 break;
704 spin_unlock(&ct->lock);
706 if (!found)
707 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
708 return 0;
711 static void ct_process_request(struct intel_guc_ct *ct,
712 u32 action, u32 len, const u32 *payload)
714 struct intel_guc *guc = ct_to_guc(ct);
716 CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
718 switch (action) {
719 case INTEL_GUC_ACTION_DEFAULT:
720 if (unlikely(len < 1))
721 goto fail_unexpected;
722 intel_guc_to_host_process_recv_msg(guc, *payload);
723 break;
725 default:
726 fail_unexpected:
727 DRM_ERROR("CT: unexpected request %x %*ph\n",
728 action, 4 * len, payload);
729 break;
733 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
735 unsigned long flags;
736 struct ct_incoming_request *request;
737 u32 header;
738 u32 *payload;
739 bool done;
741 spin_lock_irqsave(&ct->lock, flags);
742 request = list_first_entry_or_null(&ct->incoming_requests,
743 struct ct_incoming_request, link);
744 if (request)
745 list_del(&request->link);
746 done = !!list_empty(&ct->incoming_requests);
747 spin_unlock_irqrestore(&ct->lock, flags);
749 if (!request)
750 return true;
752 header = request->msg[0];
753 payload = &request->msg[1];
754 ct_process_request(ct,
755 ct_header_get_action(header),
756 ct_header_get_len(header),
757 payload);
759 kfree(request);
760 return done;
763 static void ct_incoming_request_worker_func(struct work_struct *w)
765 struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
766 bool done;
768 done = ct_process_incoming_requests(ct);
769 if (!done)
770 queue_work(system_unbound_wq, &ct->worker);
774 * DOC: CTB GuC to Host request
776 * Format of the CTB GuC to Host request message is as follows::
778 * +------------+---------+---------+---------+---------+---------+
779 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
780 * +------------+---------+---------+---------+---------+---------+
781 * | MESSAGE | MESSAGE PAYLOAD |
782 * + HEADER +---------+---------+---------+---------+---------+
783 * | | 0 | 1 | 2 | ... | n |
784 * +============+=========+=========+=========+=========+=========+
785 * | len | request specific data |
786 * +------+-----+---------+---------+---------+---------+---------+
788 * ^-----------------------len-----------------------^
791 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
793 u32 header = msg[0];
794 u32 len = ct_header_get_len(header);
795 u32 msglen = len + 1; /* total message length including header */
796 struct ct_incoming_request *request;
797 unsigned long flags;
799 GEM_BUG_ON(ct_header_is_response(header));
801 request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
802 if (unlikely(!request)) {
803 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
804 return 0; /* XXX: -ENOMEM ? */
806 memcpy(request->msg, msg, 4 * msglen);
808 spin_lock_irqsave(&ct->lock, flags);
809 list_add_tail(&request->link, &ct->incoming_requests);
810 spin_unlock_irqrestore(&ct->lock, flags);
812 queue_work(system_unbound_wq, &ct->worker);
813 return 0;
816 static void ct_process_host_channel(struct intel_guc_ct *ct)
818 struct intel_guc_ct_channel *ctch = &ct->host_channel;
819 struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
820 u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
821 int err = 0;
823 if (!ctch_is_open(ctch))
824 return;
826 do {
827 err = ctb_read(ctb, msg);
828 if (err)
829 break;
831 if (ct_header_is_response(msg[0]))
832 err = ct_handle_response(ct, msg);
833 else
834 err = ct_handle_request(ct, msg);
835 } while (!err);
837 if (GEM_WARN_ON(err == -EPROTO)) {
838 DRM_ERROR("CT: corrupted message detected!\n");
839 ctb->desc->is_in_error = 1;
844 * When we're communicating with the GuC over CT, GuC uses events
845 * to notify us about new messages being posted on the RECV buffer.
847 static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
849 struct intel_guc_ct *ct = &guc->ct;
851 ct_process_host_channel(ct);
855 * intel_guc_ct_enable - Enable buffer based command transport.
856 * @ct: pointer to CT struct
858 * Shall only be called for platforms with HAS_GUC_CT.
860 * Return: 0 on success, a negative errno code on failure.
862 int intel_guc_ct_enable(struct intel_guc_ct *ct)
864 struct intel_guc *guc = ct_to_guc(ct);
865 struct drm_i915_private *i915 = guc_to_i915(guc);
866 struct intel_guc_ct_channel *ctch = &ct->host_channel;
867 int err;
869 GEM_BUG_ON(!HAS_GUC_CT(i915));
871 err = ctch_open(guc, ctch);
872 if (unlikely(err))
873 return err;
875 /* Switch into cmd transport buffer based send() */
876 guc->send = intel_guc_send_ct;
877 guc->handler = intel_guc_to_host_event_handler_ct;
878 DRM_INFO("CT: %s\n", enableddisabled(true));
879 return 0;
883 * intel_guc_ct_disable - Disable buffer based command transport.
884 * @ct: pointer to CT struct
886 * Shall only be called for platforms with HAS_GUC_CT.
888 void intel_guc_ct_disable(struct intel_guc_ct *ct)
890 struct intel_guc *guc = ct_to_guc(ct);
891 struct drm_i915_private *i915 = guc_to_i915(guc);
892 struct intel_guc_ct_channel *ctch = &ct->host_channel;
894 GEM_BUG_ON(!HAS_GUC_CT(i915));
896 if (!ctch_is_open(ctch))
897 return;
899 ctch_close(guc, ctch);
901 /* Disable send */
902 guc->send = intel_guc_send_nop;
903 guc->handler = intel_guc_to_host_event_handler_nop;
904 DRM_INFO("CT: %s\n", enableddisabled(false));