2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <subdev/mc.h>
25 #include <subdev/top.h>
28 nvkm_falcon_load_imem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
29 u32 size
, u16 tag
, u8 port
, bool secure
)
31 if (secure
&& !falcon
->secret
) {
32 nvkm_warn(falcon
->user
,
33 "writing with secure tag on a non-secure falcon!\n");
37 falcon
->func
->load_imem(falcon
, data
, start
, size
, tag
, port
,
42 nvkm_falcon_load_dmem(struct nvkm_falcon
*falcon
, void *data
, u32 start
,
45 mutex_lock(&falcon
->dmem_mutex
);
47 falcon
->func
->load_dmem(falcon
, data
, start
, size
, port
);
49 mutex_unlock(&falcon
->dmem_mutex
);
53 nvkm_falcon_read_dmem(struct nvkm_falcon
*falcon
, u32 start
, u32 size
, u8 port
,
56 mutex_lock(&falcon
->dmem_mutex
);
58 falcon
->func
->read_dmem(falcon
, start
, size
, port
, data
);
60 mutex_unlock(&falcon
->dmem_mutex
);
64 nvkm_falcon_bind_context(struct nvkm_falcon
*falcon
, struct nvkm_memory
*inst
)
66 if (!falcon
->func
->bind_context
) {
67 nvkm_error(falcon
->user
,
68 "Context binding not supported on this falcon!\n");
72 falcon
->func
->bind_context(falcon
, inst
);
76 nvkm_falcon_set_start_addr(struct nvkm_falcon
*falcon
, u32 start_addr
)
78 falcon
->func
->set_start_addr(falcon
, start_addr
);
82 nvkm_falcon_start(struct nvkm_falcon
*falcon
)
84 falcon
->func
->start(falcon
);
88 nvkm_falcon_enable(struct nvkm_falcon
*falcon
)
90 struct nvkm_device
*device
= falcon
->owner
->device
;
91 enum nvkm_devidx id
= falcon
->owner
->index
;
94 nvkm_mc_enable(device
, id
);
95 ret
= falcon
->func
->enable(falcon
);
97 nvkm_mc_disable(device
, id
);
105 nvkm_falcon_disable(struct nvkm_falcon
*falcon
)
107 struct nvkm_device
*device
= falcon
->owner
->device
;
108 enum nvkm_devidx id
= falcon
->owner
->index
;
110 /* already disabled, return or wait_idle will timeout */
111 if (!nvkm_mc_enabled(device
, id
))
114 falcon
->func
->disable(falcon
);
116 nvkm_mc_disable(device
, id
);
120 nvkm_falcon_reset(struct nvkm_falcon
*falcon
)
122 nvkm_falcon_disable(falcon
);
123 return nvkm_falcon_enable(falcon
);
127 nvkm_falcon_wait_for_halt(struct nvkm_falcon
*falcon
, u32 ms
)
129 return falcon
->func
->wait_for_halt(falcon
, ms
);
133 nvkm_falcon_clear_interrupt(struct nvkm_falcon
*falcon
, u32 mask
)
135 return falcon
->func
->clear_interrupt(falcon
, mask
);
139 nvkm_falcon_oneinit(struct nvkm_falcon
*falcon
)
141 const struct nvkm_falcon_func
*func
= falcon
->func
;
142 const struct nvkm_subdev
*subdev
= falcon
->owner
;
146 falcon
->addr
= nvkm_top_addr(subdev
->device
, subdev
->index
);
147 if (WARN_ON(!falcon
->addr
))
151 reg
= nvkm_falcon_rd32(falcon
, 0x12c);
152 falcon
->version
= reg
& 0xf;
153 falcon
->secret
= (reg
>> 4) & 0x3;
154 falcon
->code
.ports
= (reg
>> 8) & 0xf;
155 falcon
->data
.ports
= (reg
>> 12) & 0xf;
157 reg
= nvkm_falcon_rd32(falcon
, 0x108);
158 falcon
->code
.limit
= (reg
& 0x1ff) << 8;
159 falcon
->data
.limit
= (reg
& 0x3fe00) >> 1;
162 u32 val
= nvkm_falcon_rd32(falcon
, func
->debug
);
163 falcon
->debug
= (val
>> 20) & 0x1;
170 nvkm_falcon_put(struct nvkm_falcon
*falcon
, const struct nvkm_subdev
*user
)
172 if (unlikely(!falcon
))
175 mutex_lock(&falcon
->mutex
);
176 if (falcon
->user
== user
) {
177 nvkm_debug(falcon
->user
, "released %s falcon\n", falcon
->name
);
180 mutex_unlock(&falcon
->mutex
);
184 nvkm_falcon_get(struct nvkm_falcon
*falcon
, const struct nvkm_subdev
*user
)
188 mutex_lock(&falcon
->mutex
);
190 nvkm_error(user
, "%s falcon already acquired by %s!\n",
191 falcon
->name
, nvkm_subdev_name
[falcon
->user
->index
]);
192 mutex_unlock(&falcon
->mutex
);
196 nvkm_debug(user
, "acquired %s falcon\n", falcon
->name
);
197 if (!falcon
->oneinit
)
198 ret
= nvkm_falcon_oneinit(falcon
);
200 mutex_unlock(&falcon
->mutex
);
205 nvkm_falcon_dtor(struct nvkm_falcon
*falcon
)
210 nvkm_falcon_ctor(const struct nvkm_falcon_func
*func
,
211 struct nvkm_subdev
*subdev
, const char *name
, u32 addr
,
212 struct nvkm_falcon
*falcon
)
215 falcon
->owner
= subdev
;
218 mutex_init(&falcon
->mutex
);
219 mutex_init(&falcon
->dmem_mutex
);
224 nvkm_falcon_del(struct nvkm_falcon
**pfalcon
)
227 nvkm_falcon_dtor(*pfalcon
);