2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq
*cmdq
, u32 size
, bool *rewind
)
28 u32 head
= nvkm_falcon_rd32(cmdq
->qmgr
->falcon
, cmdq
->head_reg
);
29 u32 tail
= nvkm_falcon_rd32(cmdq
->qmgr
->falcon
, cmdq
->tail_reg
);
32 size
= ALIGN(size
, QUEUE_ALIGNMENT
);
35 free
= cmdq
->offset
+ cmdq
->size
- head
;
45 free
= tail
- head
- 1;
51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq
*cmdq
, void *data
, u32 size
)
53 struct nvkm_falcon
*falcon
= cmdq
->qmgr
->falcon
;
54 nvkm_falcon_load_dmem(falcon
, data
, cmdq
->position
, size
, 0);
55 cmdq
->position
+= ALIGN(size
, QUEUE_ALIGNMENT
);
59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq
*cmdq
)
61 struct nvfw_falcon_cmd cmd
;
63 cmd
.unit_id
= NV_FALCON_CMD_UNIT_ID_REWIND
;
64 cmd
.size
= sizeof(cmd
);
65 nvkm_falcon_cmdq_push(cmdq
, &cmd
, cmd
.size
);
67 cmdq
->position
= cmdq
->offset
;
71 nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq
*cmdq
, u32 size
)
73 struct nvkm_falcon
*falcon
= cmdq
->qmgr
->falcon
;
76 mutex_lock(&cmdq
->mutex
);
78 if (!nvkm_falcon_cmdq_has_room(cmdq
, size
, &rewind
)) {
79 FLCNQ_DBG(cmdq
, "queue full");
80 mutex_unlock(&cmdq
->mutex
);
84 cmdq
->position
= nvkm_falcon_rd32(falcon
, cmdq
->head_reg
);
87 nvkm_falcon_cmdq_rewind(cmdq
);
93 nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq
*cmdq
)
95 nvkm_falcon_wr32(cmdq
->qmgr
->falcon
, cmdq
->head_reg
, cmdq
->position
);
96 mutex_unlock(&cmdq
->mutex
);
100 nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq
*cmdq
, struct nvfw_falcon_cmd
*cmd
)
102 static unsigned timeout
= 2000;
103 unsigned long end_jiffies
= jiffies
+ msecs_to_jiffies(timeout
);
106 while (ret
== -EAGAIN
&& time_before(jiffies
, end_jiffies
))
107 ret
= nvkm_falcon_cmdq_open(cmdq
, cmd
->size
);
109 FLCNQ_ERR(cmdq
, "timeout waiting for queue space");
113 nvkm_falcon_cmdq_push(cmdq
, cmd
, cmd
->size
);
114 nvkm_falcon_cmdq_close(cmdq
);
118 /* specifies that we want to know the command status in the answer message */
119 #define CMD_FLAGS_STATUS BIT(0)
120 /* specifies that we want an interrupt when the answer message is queued */
121 #define CMD_FLAGS_INTR BIT(1)
124 nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq
*cmdq
, struct nvfw_falcon_cmd
*cmd
,
125 nvkm_falcon_qmgr_callback cb
, void *priv
,
126 unsigned long timeout
)
128 struct nvkm_falcon_qmgr_seq
*seq
;
131 if (!wait_for_completion_timeout(&cmdq
->ready
,
132 msecs_to_jiffies(1000))) {
133 FLCNQ_ERR(cmdq
, "timeout waiting for queue ready");
137 seq
= nvkm_falcon_qmgr_seq_acquire(cmdq
->qmgr
);
141 cmd
->seq_id
= seq
->id
;
142 cmd
->ctrl_flags
= CMD_FLAGS_STATUS
| CMD_FLAGS_INTR
;
144 seq
->state
= SEQ_STATE_USED
;
145 seq
->async
= !timeout
;
149 ret
= nvkm_falcon_cmdq_write(cmdq
, cmd
);
151 seq
->state
= SEQ_STATE_PENDING
;
152 nvkm_falcon_qmgr_seq_release(cmdq
->qmgr
, seq
);
157 if (!wait_for_completion_timeout(&seq
->done
, timeout
)) {
158 FLCNQ_ERR(cmdq
, "timeout waiting for reply");
162 nvkm_falcon_qmgr_seq_release(cmdq
->qmgr
, seq
);
169 nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq
*cmdq
)
171 reinit_completion(&cmdq
->ready
);
175 nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq
*cmdq
,
176 u32 index
, u32 offset
, u32 size
)
178 const struct nvkm_falcon_func
*func
= cmdq
->qmgr
->falcon
->func
;
180 cmdq
->head_reg
= func
->cmdq
.head
+ index
* func
->cmdq
.stride
;
181 cmdq
->tail_reg
= func
->cmdq
.tail
+ index
* func
->cmdq
.stride
;
182 cmdq
->offset
= offset
;
184 complete_all(&cmdq
->ready
);
186 FLCNQ_DBG(cmdq
, "initialised @ index %d offset 0x%08x size 0x%08x",
187 index
, cmdq
->offset
, cmdq
->size
);
191 nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq
**pcmdq
)
193 struct nvkm_falcon_cmdq
*cmdq
= *pcmdq
;
201 nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr
*qmgr
, const char *name
,
202 struct nvkm_falcon_cmdq
**pcmdq
)
204 struct nvkm_falcon_cmdq
*cmdq
= *pcmdq
;
206 if (!(cmdq
= *pcmdq
= kzalloc(sizeof(*cmdq
), GFP_KERNEL
)))
211 mutex_init(&cmdq
->mutex
);
212 init_completion(&cmdq
->ready
);