treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / uc / intel_guc_ct.c
blobc6f971a049f9e1278838ef041423e8fbb8bc5d59
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016-2019 Intel Corporation
4 */
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
9 #ifdef CONFIG_DRM_I915_DEBUG_GUC
10 #define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
11 #else
12 #define CT_DEBUG_DRIVER(...) do { } while (0)
13 #endif
15 struct ct_request {
16 struct list_head link;
17 u32 fence;
18 u32 status;
19 u32 response_len;
20 u32 *response_buf;
23 struct ct_incoming_request {
24 struct list_head link;
25 u32 msg[];
28 enum { CTB_SEND = 0, CTB_RECV = 1 };
30 enum { CTB_OWNER_HOST = 0 };
32 static void ct_incoming_request_worker_func(struct work_struct *w);
34 /**
35 * intel_guc_ct_init_early - Initialize CT state without requiring device access
36 * @ct: pointer to CT struct
38 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
40 spin_lock_init(&ct->requests.lock);
41 INIT_LIST_HEAD(&ct->requests.pending);
42 INIT_LIST_HEAD(&ct->requests.incoming);
43 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
46 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
48 return container_of(ct, struct intel_guc, ct);
51 static inline const char *guc_ct_buffer_type_to_str(u32 type)
53 switch (type) {
54 case INTEL_GUC_CT_BUFFER_TYPE_SEND:
55 return "SEND";
56 case INTEL_GUC_CT_BUFFER_TYPE_RECV:
57 return "RECV";
58 default:
59 return "<invalid>";
63 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
64 u32 cmds_addr, u32 size)
66 CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
67 memset(desc, 0, sizeof(*desc));
68 desc->addr = cmds_addr;
69 desc->size = size;
70 desc->owner = CTB_OWNER_HOST;
73 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
75 CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
76 desc, desc->head, desc->tail);
77 desc->head = 0;
78 desc->tail = 0;
79 desc->is_in_error = 0;
82 static int guc_action_register_ct_buffer(struct intel_guc *guc,
83 u32 desc_addr,
84 u32 type)
86 u32 action[] = {
87 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
88 desc_addr,
89 sizeof(struct guc_ct_buffer_desc),
90 type
92 int err;
94 /* Can't use generic send(), CT registration must go over MMIO */
95 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
96 if (err)
97 DRM_ERROR("CT: register %s buffer failed; err=%d\n",
98 guc_ct_buffer_type_to_str(type), err);
99 return err;
102 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
103 u32 type)
105 u32 action[] = {
106 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
107 CTB_OWNER_HOST,
108 type
110 int err;
112 /* Can't use generic send(), CT deregistration must go over MMIO */
113 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
114 if (err)
115 DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
116 guc_ct_buffer_type_to_str(type), err);
117 return err;
121 * intel_guc_ct_init - Init buffer-based communication
122 * @ct: pointer to CT struct
124 * Allocate memory required for buffer-based communication.
126 * Return: 0 on success, a negative errno code on failure.
128 int intel_guc_ct_init(struct intel_guc_ct *ct)
130 struct intel_guc *guc = ct_to_guc(ct);
131 void *blob;
132 int err;
133 int i;
135 GEM_BUG_ON(ct->vma);
137 /* We allocate 1 page to hold both descriptors and both buffers.
138 * ___________.....................
139 * |desc (SEND)| :
140 * |___________| PAGE/4
141 * :___________....................:
142 * |desc (RECV)| :
143 * |___________| PAGE/4
144 * :_______________________________:
145 * |cmds (SEND) |
146 * | PAGE/4
147 * |_______________________________|
148 * |cmds (RECV) |
149 * | PAGE/4
150 * |_______________________________|
152 * Each message can use a maximum of 32 dwords and we don't expect to
153 * have more than 1 in flight at any time, so we have enough space.
154 * Some logic further ahead will rely on the fact that there is only 1
155 * page and that it is always mapped, so if the size is changed the
156 * other code will need updating as well.
159 err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
160 if (err) {
161 DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
162 return err;
165 CT_DEBUG_DRIVER("CT: vma base=%#x\n",
166 intel_guc_ggtt_offset(guc, ct->vma));
168 /* store pointers to desc and cmds */
169 for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
170 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
171 ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
172 ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
175 return 0;
179 * intel_guc_ct_fini - Fini buffer-based communication
180 * @ct: pointer to CT struct
182 * Deallocate memory required for buffer-based communication.
184 void intel_guc_ct_fini(struct intel_guc_ct *ct)
186 GEM_BUG_ON(ct->enabled);
188 i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
192 * intel_guc_ct_enable - Enable buffer based command transport.
193 * @ct: pointer to CT struct
195 * Return: 0 on success, a negative errno code on failure.
197 int intel_guc_ct_enable(struct intel_guc_ct *ct)
199 struct intel_guc *guc = ct_to_guc(ct);
200 u32 base;
201 int err;
202 int i;
204 GEM_BUG_ON(ct->enabled);
206 /* vma should be already allocated and map'ed */
207 GEM_BUG_ON(!ct->vma);
208 base = intel_guc_ggtt_offset(guc, ct->vma);
210 /* (re)initialize descriptors
211 * cmds buffers are in the second half of the blob page
213 for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
214 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
215 guc_ct_buffer_desc_init(ct->ctbs[i].desc,
216 base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
217 PAGE_SIZE/4);
220 /* register buffers, starting wirh RECV buffer
221 * descriptors are in first half of the blob
223 err = guc_action_register_ct_buffer(guc,
224 base + PAGE_SIZE/4 * CTB_RECV,
225 INTEL_GUC_CT_BUFFER_TYPE_RECV);
226 if (unlikely(err))
227 goto err_out;
229 err = guc_action_register_ct_buffer(guc,
230 base + PAGE_SIZE/4 * CTB_SEND,
231 INTEL_GUC_CT_BUFFER_TYPE_SEND);
232 if (unlikely(err))
233 goto err_deregister;
235 ct->enabled = true;
237 return 0;
239 err_deregister:
240 guc_action_deregister_ct_buffer(guc,
241 INTEL_GUC_CT_BUFFER_TYPE_RECV);
242 err_out:
243 DRM_ERROR("CT: can't open channel; err=%d\n", err);
244 return err;
248 * intel_guc_ct_disable - Disable buffer based command transport.
249 * @ct: pointer to CT struct
251 void intel_guc_ct_disable(struct intel_guc_ct *ct)
253 struct intel_guc *guc = ct_to_guc(ct);
255 GEM_BUG_ON(!ct->enabled);
257 ct->enabled = false;
259 if (intel_guc_is_running(guc)) {
260 guc_action_deregister_ct_buffer(guc,
261 INTEL_GUC_CT_BUFFER_TYPE_SEND);
262 guc_action_deregister_ct_buffer(guc,
263 INTEL_GUC_CT_BUFFER_TYPE_RECV);
267 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
269 /* For now it's trivial */
270 return ++ct->requests.next_fence;
274 * DOC: CTB Host to GuC request
276 * Format of the CTB Host to GuC request message is as follows::
278 * +------------+---------+---------+---------+---------+
279 * | msg[0] | [1] | [2] | ... | [n-1] |
280 * +------------+---------+---------+---------+---------+
281 * | MESSAGE | MESSAGE PAYLOAD |
282 * + HEADER +---------+---------+---------+---------+
283 * | | 0 | 1 | ... | n |
284 * +============+=========+=========+=========+=========+
285 * | len >= 1 | FENCE | request specific data |
286 * +------+-----+---------+---------+---------+---------+
288 * ^-----------------len-------------------^
291 static int ctb_write(struct intel_guc_ct_buffer *ctb,
292 const u32 *action,
293 u32 len /* in dwords */,
294 u32 fence,
295 bool want_response)
297 struct guc_ct_buffer_desc *desc = ctb->desc;
298 u32 head = desc->head / 4; /* in dwords */
299 u32 tail = desc->tail / 4; /* in dwords */
300 u32 size = desc->size / 4; /* in dwords */
301 u32 used; /* in dwords */
302 u32 header;
303 u32 *cmds = ctb->cmds;
304 unsigned int i;
306 GEM_BUG_ON(desc->size % 4);
307 GEM_BUG_ON(desc->head % 4);
308 GEM_BUG_ON(desc->tail % 4);
309 GEM_BUG_ON(tail >= size);
312 * tail == head condition indicates empty. GuC FW does not support
313 * using up the entire buffer to get tail == head meaning full.
315 if (tail < head)
316 used = (size - head) + tail;
317 else
318 used = tail - head;
320 /* make sure there is a space including extra dw for the fence */
321 if (unlikely(used + len + 1 >= size))
322 return -ENOSPC;
325 * Write the message. The format is the following:
326 * DW0: header (including action code)
327 * DW1: fence
328 * DW2+: action data
330 header = (len << GUC_CT_MSG_LEN_SHIFT) |
331 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
332 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
333 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
335 CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
336 4, &header, 4, &fence,
337 4 * (len - 1), &action[1]);
339 cmds[tail] = header;
340 tail = (tail + 1) % size;
342 cmds[tail] = fence;
343 tail = (tail + 1) % size;
345 for (i = 1; i < len; i++) {
346 cmds[tail] = action[i];
347 tail = (tail + 1) % size;
350 /* now update desc tail (back in bytes) */
351 desc->tail = tail * 4;
352 GEM_BUG_ON(desc->tail > desc->size);
354 return 0;
358 * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
359 * @desc: buffer descriptor
360 * @fence: response fence
361 * @status: placeholder for status
363 * Guc will update CT buffer descriptor with new fence and status
364 * after processing the command identified by the fence. Wait for
365 * specified fence and then read from the descriptor status of the
366 * command.
368 * Return:
369 * * 0 response received (status is valid)
370 * * -ETIMEDOUT no response within hardcoded timeout
371 * * -EPROTO no response, CT buffer is in error
373 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
374 u32 fence,
375 u32 *status)
377 int err;
380 * Fast commands should complete in less than 10us, so sample quickly
381 * up to that length of time, then switch to a slower sleep-wait loop.
382 * No GuC command should ever take longer than 10ms.
384 #define done (READ_ONCE(desc->fence) == fence)
385 err = wait_for_us(done, 10);
386 if (err)
387 err = wait_for(done, 10);
388 #undef done
390 if (unlikely(err)) {
391 DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
392 fence, desc->fence);
394 if (WARN_ON(desc->is_in_error)) {
395 /* Something went wrong with the messaging, try to reset
396 * the buffer and hope for the best
398 guc_ct_buffer_desc_reset(desc);
399 err = -EPROTO;
403 *status = desc->status;
404 return err;
408 * wait_for_ct_request_update - Wait for CT request state update.
409 * @req: pointer to pending request
410 * @status: placeholder for status
412 * For each sent request, Guc shall send bac CT response message.
413 * Our message handler will update status of tracked request once
414 * response message with given fence is received. Wait here and
415 * check for valid response status value.
417 * Return:
418 * * 0 response received (status is valid)
419 * * -ETIMEDOUT no response within hardcoded timeout
421 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
423 int err;
426 * Fast commands should complete in less than 10us, so sample quickly
427 * up to that length of time, then switch to a slower sleep-wait loop.
428 * No GuC command should ever take longer than 10ms.
430 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
431 err = wait_for_us(done, 10);
432 if (err)
433 err = wait_for(done, 10);
434 #undef done
436 if (unlikely(err))
437 DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
439 *status = req->status;
440 return err;
443 static int ct_send(struct intel_guc_ct *ct,
444 const u32 *action,
445 u32 len,
446 u32 *response_buf,
447 u32 response_buf_size,
448 u32 *status)
450 struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
451 struct guc_ct_buffer_desc *desc = ctb->desc;
452 struct ct_request request;
453 unsigned long flags;
454 u32 fence;
455 int err;
457 GEM_BUG_ON(!ct->enabled);
458 GEM_BUG_ON(!len);
459 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
460 GEM_BUG_ON(!response_buf && response_buf_size);
462 fence = ct_get_next_fence(ct);
463 request.fence = fence;
464 request.status = 0;
465 request.response_len = response_buf_size;
466 request.response_buf = response_buf;
468 spin_lock_irqsave(&ct->requests.lock, flags);
469 list_add_tail(&request.link, &ct->requests.pending);
470 spin_unlock_irqrestore(&ct->requests.lock, flags);
472 err = ctb_write(ctb, action, len, fence, !!response_buf);
473 if (unlikely(err))
474 goto unlink;
476 intel_guc_notify(ct_to_guc(ct));
478 if (response_buf)
479 err = wait_for_ct_request_update(&request, status);
480 else
481 err = wait_for_ctb_desc_update(desc, fence, status);
482 if (unlikely(err))
483 goto unlink;
485 if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
486 err = -EIO;
487 goto unlink;
490 if (response_buf) {
491 /* There shall be no data in the status */
492 WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
493 /* Return actual response len */
494 err = request.response_len;
495 } else {
496 /* There shall be no response payload */
497 WARN_ON(request.response_len);
498 /* Return data decoded from the status dword */
499 err = INTEL_GUC_MSG_TO_DATA(*status);
502 unlink:
503 spin_lock_irqsave(&ct->requests.lock, flags);
504 list_del(&request.link);
505 spin_unlock_irqrestore(&ct->requests.lock, flags);
507 return err;
511 * Command Transport (CT) buffer based GuC send function.
513 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
514 u32 *response_buf, u32 response_buf_size)
516 struct intel_guc *guc = ct_to_guc(ct);
517 u32 status = ~0; /* undefined */
518 int ret;
520 if (unlikely(!ct->enabled)) {
521 WARN(1, "Unexpected send: action=%#x\n", *action);
522 return -ENODEV;
525 mutex_lock(&guc->send_mutex);
527 ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
528 if (unlikely(ret < 0)) {
529 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
530 action[0], ret, status);
531 } else if (unlikely(ret)) {
532 CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
533 action[0], ret, ret);
536 mutex_unlock(&guc->send_mutex);
537 return ret;
540 static inline unsigned int ct_header_get_len(u32 header)
542 return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
545 static inline unsigned int ct_header_get_action(u32 header)
547 return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
550 static inline bool ct_header_is_response(u32 header)
552 return !!(header & GUC_CT_MSG_IS_RESPONSE);
555 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
557 struct guc_ct_buffer_desc *desc = ctb->desc;
558 u32 head = desc->head / 4; /* in dwords */
559 u32 tail = desc->tail / 4; /* in dwords */
560 u32 size = desc->size / 4; /* in dwords */
561 u32 *cmds = ctb->cmds;
562 s32 available; /* in dwords */
563 unsigned int len;
564 unsigned int i;
566 GEM_BUG_ON(desc->size % 4);
567 GEM_BUG_ON(desc->head % 4);
568 GEM_BUG_ON(desc->tail % 4);
569 GEM_BUG_ON(tail >= size);
570 GEM_BUG_ON(head >= size);
572 /* tail == head condition indicates empty */
573 available = tail - head;
574 if (unlikely(available == 0))
575 return -ENODATA;
577 /* beware of buffer wrap case */
578 if (unlikely(available < 0))
579 available += size;
580 CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
581 GEM_BUG_ON(available < 0);
583 data[0] = cmds[head];
584 head = (head + 1) % size;
586 /* message len with header */
587 len = ct_header_get_len(data[0]) + 1;
588 if (unlikely(len > (u32)available)) {
589 DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
590 4, data,
591 4 * (head + available - 1 > size ?
592 size - head : available - 1), &cmds[head],
593 4 * (head + available - 1 > size ?
594 available - 1 - size + head : 0), &cmds[0]);
595 return -EPROTO;
598 for (i = 1; i < len; i++) {
599 data[i] = cmds[head];
600 head = (head + 1) % size;
602 CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
604 desc->head = head * 4;
605 return 0;
609 * DOC: CTB GuC to Host response
611 * Format of the CTB GuC to Host response message is as follows::
613 * +------------+---------+---------+---------+---------+---------+
614 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
615 * +------------+---------+---------+---------+---------+---------+
616 * | MESSAGE | MESSAGE PAYLOAD |
617 * + HEADER +---------+---------+---------+---------+---------+
618 * | | 0 | 1 | 2 | ... | n |
619 * +============+=========+=========+=========+=========+=========+
620 * | len >= 2 | FENCE | STATUS | response specific data |
621 * +------+-----+---------+---------+---------+---------+---------+
623 * ^-----------------------len-----------------------^
626 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
628 u32 header = msg[0];
629 u32 len = ct_header_get_len(header);
630 u32 msglen = len + 1; /* total message length including header */
631 u32 fence;
632 u32 status;
633 u32 datalen;
634 struct ct_request *req;
635 bool found = false;
637 GEM_BUG_ON(!ct_header_is_response(header));
638 GEM_BUG_ON(!in_irq());
640 /* Response payload shall at least include fence and status */
641 if (unlikely(len < 2)) {
642 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
643 return -EPROTO;
646 fence = msg[1];
647 status = msg[2];
648 datalen = len - 2;
650 /* Format of the status follows RESPONSE message */
651 if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
652 DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
653 return -EPROTO;
656 CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
658 spin_lock(&ct->requests.lock);
659 list_for_each_entry(req, &ct->requests.pending, link) {
660 if (unlikely(fence != req->fence)) {
661 CT_DEBUG_DRIVER("CT: request %u awaits response\n",
662 req->fence);
663 continue;
665 if (unlikely(datalen > req->response_len)) {
666 DRM_ERROR("CT: response %u too long %*ph\n",
667 req->fence, 4 * msglen, msg);
668 datalen = 0;
670 if (datalen)
671 memcpy(req->response_buf, msg + 3, 4 * datalen);
672 req->response_len = datalen;
673 WRITE_ONCE(req->status, status);
674 found = true;
675 break;
677 spin_unlock(&ct->requests.lock);
679 if (!found)
680 DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
681 return 0;
684 static void ct_process_request(struct intel_guc_ct *ct,
685 u32 action, u32 len, const u32 *payload)
687 struct intel_guc *guc = ct_to_guc(ct);
688 int ret;
690 CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
692 switch (action) {
693 case INTEL_GUC_ACTION_DEFAULT:
694 ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
695 if (unlikely(ret))
696 goto fail_unexpected;
697 break;
699 default:
700 fail_unexpected:
701 DRM_ERROR("CT: unexpected request %x %*ph\n",
702 action, 4 * len, payload);
703 break;
707 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
709 unsigned long flags;
710 struct ct_incoming_request *request;
711 u32 header;
712 u32 *payload;
713 bool done;
715 spin_lock_irqsave(&ct->requests.lock, flags);
716 request = list_first_entry_or_null(&ct->requests.incoming,
717 struct ct_incoming_request, link);
718 if (request)
719 list_del(&request->link);
720 done = !!list_empty(&ct->requests.incoming);
721 spin_unlock_irqrestore(&ct->requests.lock, flags);
723 if (!request)
724 return true;
726 header = request->msg[0];
727 payload = &request->msg[1];
728 ct_process_request(ct,
729 ct_header_get_action(header),
730 ct_header_get_len(header),
731 payload);
733 kfree(request);
734 return done;
737 static void ct_incoming_request_worker_func(struct work_struct *w)
739 struct intel_guc_ct *ct =
740 container_of(w, struct intel_guc_ct, requests.worker);
741 bool done;
743 done = ct_process_incoming_requests(ct);
744 if (!done)
745 queue_work(system_unbound_wq, &ct->requests.worker);
749 * DOC: CTB GuC to Host request
751 * Format of the CTB GuC to Host request message is as follows::
753 * +------------+---------+---------+---------+---------+---------+
754 * | msg[0] | [1] | [2] | [3] | ... | [n-1] |
755 * +------------+---------+---------+---------+---------+---------+
756 * | MESSAGE | MESSAGE PAYLOAD |
757 * + HEADER +---------+---------+---------+---------+---------+
758 * | | 0 | 1 | 2 | ... | n |
759 * +============+=========+=========+=========+=========+=========+
760 * | len | request specific data |
761 * +------+-----+---------+---------+---------+---------+---------+
763 * ^-----------------------len-----------------------^
766 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
768 u32 header = msg[0];
769 u32 len = ct_header_get_len(header);
770 u32 msglen = len + 1; /* total message length including header */
771 struct ct_incoming_request *request;
772 unsigned long flags;
774 GEM_BUG_ON(ct_header_is_response(header));
776 request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
777 if (unlikely(!request)) {
778 DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
779 return 0; /* XXX: -ENOMEM ? */
781 memcpy(request->msg, msg, 4 * msglen);
783 spin_lock_irqsave(&ct->requests.lock, flags);
784 list_add_tail(&request->link, &ct->requests.incoming);
785 spin_unlock_irqrestore(&ct->requests.lock, flags);
787 queue_work(system_unbound_wq, &ct->requests.worker);
788 return 0;
792 * When we're communicating with the GuC over CT, GuC uses events
793 * to notify us about new messages being posted on the RECV buffer.
795 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
797 struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
798 u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
799 int err = 0;
801 if (unlikely(!ct->enabled)) {
802 WARN(1, "Unexpected GuC event received while CT disabled!\n");
803 return;
806 do {
807 err = ctb_read(ctb, msg);
808 if (err)
809 break;
811 if (ct_header_is_response(msg[0]))
812 err = ct_handle_response(ct, msg);
813 else
814 err = ct_handle_request(ct, msg);
815 } while (!err);
817 if (GEM_WARN_ON(err == -EPROTO)) {
818 DRM_ERROR("CT: corrupted message detected!\n");
819 ctb->desc->is_in_error = 1;