First Support on Ginger and OMAP TI
[linux-ginger.git] / arch / powerpc / platforms / cell / spufs / hw_ops.c
blob64f8540b832c32bc703b37498965985465bf6903
1 /* hw_ops.c - query/set operations on active SPU context.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/poll.h>
27 #include <linux/smp.h>
28 #include <linux/stddef.h>
29 #include <linux/unistd.h>
31 #include <asm/io.h>
32 #include <asm/spu.h>
33 #include <asm/spu_priv1.h>
34 #include <asm/spu_csa.h>
35 #include <asm/mmu_context.h>
36 #include "spufs.h"
38 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
40 struct spu *spu = ctx->spu;
41 struct spu_problem __iomem *prob = spu->problem;
42 u32 mbox_stat;
43 int ret = 0;
45 spin_lock_irq(&spu->register_lock);
46 mbox_stat = in_be32(&prob->mb_stat_R);
47 if (mbox_stat & 0x0000ff) {
48 *data = in_be32(&prob->pu_mb_R);
49 ret = 4;
51 spin_unlock_irq(&spu->register_lock);
52 return ret;
55 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
57 return in_be32(&ctx->spu->problem->mb_stat_R);
60 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
61 unsigned int events)
63 struct spu *spu = ctx->spu;
64 int ret = 0;
65 u32 stat;
67 spin_lock_irq(&spu->register_lock);
68 stat = in_be32(&spu->problem->mb_stat_R);
70 /* if the requested event is there, return the poll
71 mask, otherwise enable the interrupt to get notified,
72 but first mark any pending interrupts as done so
73 we don't get woken up unnecessarily */
75 if (events & (POLLIN | POLLRDNORM)) {
76 if (stat & 0xff0000)
77 ret |= POLLIN | POLLRDNORM;
78 else {
79 spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
80 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
83 if (events & (POLLOUT | POLLWRNORM)) {
84 if (stat & 0x00ff00)
85 ret = POLLOUT | POLLWRNORM;
86 else {
87 spu_int_stat_clear(spu, 2,
88 CLASS2_MAILBOX_THRESHOLD_INTR);
89 spu_int_mask_or(spu, 2,
90 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
93 spin_unlock_irq(&spu->register_lock);
94 return ret;
97 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
99 struct spu *spu = ctx->spu;
100 struct spu_problem __iomem *prob = spu->problem;
101 struct spu_priv2 __iomem *priv2 = spu->priv2;
102 int ret;
104 spin_lock_irq(&spu->register_lock);
105 if (in_be32(&prob->mb_stat_R) & 0xff0000) {
106 /* read the first available word */
107 *data = in_be64(&priv2->puint_mb_R);
108 ret = 4;
109 } else {
110 /* make sure we get woken up by the interrupt */
111 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
112 ret = 0;
114 spin_unlock_irq(&spu->register_lock);
115 return ret;
118 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
120 struct spu *spu = ctx->spu;
121 struct spu_problem __iomem *prob = spu->problem;
122 int ret;
124 spin_lock_irq(&spu->register_lock);
125 if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
126 /* we have space to write wbox_data to */
127 out_be32(&prob->spu_mb_W, data);
128 ret = 4;
129 } else {
130 /* make sure we get woken up by the interrupt when space
131 becomes available */
132 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
133 ret = 0;
135 spin_unlock_irq(&spu->register_lock);
136 return ret;
139 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
141 out_be32(&ctx->spu->problem->signal_notify1, data);
144 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
146 out_be32(&ctx->spu->problem->signal_notify2, data);
149 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
151 struct spu *spu = ctx->spu;
152 struct spu_priv2 __iomem *priv2 = spu->priv2;
153 u64 tmp;
155 spin_lock_irq(&spu->register_lock);
156 tmp = in_be64(&priv2->spu_cfg_RW);
157 if (val)
158 tmp |= 1;
159 else
160 tmp &= ~1;
161 out_be64(&priv2->spu_cfg_RW, tmp);
162 spin_unlock_irq(&spu->register_lock);
165 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
167 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
170 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
172 struct spu *spu = ctx->spu;
173 struct spu_priv2 __iomem *priv2 = spu->priv2;
174 u64 tmp;
176 spin_lock_irq(&spu->register_lock);
177 tmp = in_be64(&priv2->spu_cfg_RW);
178 if (val)
179 tmp |= 2;
180 else
181 tmp &= ~2;
182 out_be64(&priv2->spu_cfg_RW, tmp);
183 spin_unlock_irq(&spu->register_lock);
186 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
188 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
191 static u32 spu_hw_npc_read(struct spu_context *ctx)
193 return in_be32(&ctx->spu->problem->spu_npc_RW);
196 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
198 out_be32(&ctx->spu->problem->spu_npc_RW, val);
201 static u32 spu_hw_status_read(struct spu_context *ctx)
203 return in_be32(&ctx->spu->problem->spu_status_R);
206 static char *spu_hw_get_ls(struct spu_context *ctx)
208 return ctx->spu->local_store;
211 static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
213 out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
216 static u32 spu_hw_runcntl_read(struct spu_context *ctx)
218 return in_be32(&ctx->spu->problem->spu_runcntl_RW);
221 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
223 spin_lock_irq(&ctx->spu->register_lock);
224 if (val & SPU_RUNCNTL_ISOLATE)
225 spu_hw_privcntl_write(ctx,
226 SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
227 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
228 spin_unlock_irq(&ctx->spu->register_lock);
231 static void spu_hw_runcntl_stop(struct spu_context *ctx)
233 spin_lock_irq(&ctx->spu->register_lock);
234 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
235 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
236 cpu_relax();
237 spin_unlock_irq(&ctx->spu->register_lock);
240 static void spu_hw_master_start(struct spu_context *ctx)
242 struct spu *spu = ctx->spu;
243 u64 sr1;
245 spin_lock_irq(&spu->register_lock);
246 sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
247 spu_mfc_sr1_set(spu, sr1);
248 spin_unlock_irq(&spu->register_lock);
251 static void spu_hw_master_stop(struct spu_context *ctx)
253 struct spu *spu = ctx->spu;
254 u64 sr1;
256 spin_lock_irq(&spu->register_lock);
257 sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
258 spu_mfc_sr1_set(spu, sr1);
259 spin_unlock_irq(&spu->register_lock);
262 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
264 struct spu_problem __iomem *prob = ctx->spu->problem;
265 int ret;
267 spin_lock_irq(&ctx->spu->register_lock);
268 ret = -EAGAIN;
269 if (in_be32(&prob->dma_querytype_RW))
270 goto out;
271 ret = 0;
272 out_be32(&prob->dma_querymask_RW, mask);
273 out_be32(&prob->dma_querytype_RW, mode);
274 out:
275 spin_unlock_irq(&ctx->spu->register_lock);
276 return ret;
279 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
281 return in_be32(&ctx->spu->problem->dma_tagstatus_R);
284 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
286 return in_be32(&ctx->spu->problem->dma_qstatus_R);
289 static int spu_hw_send_mfc_command(struct spu_context *ctx,
290 struct mfc_dma_command *cmd)
292 u32 status;
293 struct spu_problem __iomem *prob = ctx->spu->problem;
295 spin_lock_irq(&ctx->spu->register_lock);
296 out_be32(&prob->mfc_lsa_W, cmd->lsa);
297 out_be64(&prob->mfc_ea_W, cmd->ea);
298 out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
299 cmd->size << 16 | cmd->tag);
300 out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
301 cmd->class << 16 | cmd->cmd);
302 status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
303 spin_unlock_irq(&ctx->spu->register_lock);
305 switch (status & 0xffff) {
306 case 0:
307 return 0;
308 case 2:
309 return -EAGAIN;
310 default:
311 return -EINVAL;
315 static void spu_hw_restart_dma(struct spu_context *ctx)
317 struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
319 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
320 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
323 struct spu_context_ops spu_hw_ops = {
324 .mbox_read = spu_hw_mbox_read,
325 .mbox_stat_read = spu_hw_mbox_stat_read,
326 .mbox_stat_poll = spu_hw_mbox_stat_poll,
327 .ibox_read = spu_hw_ibox_read,
328 .wbox_write = spu_hw_wbox_write,
329 .signal1_write = spu_hw_signal1_write,
330 .signal2_write = spu_hw_signal2_write,
331 .signal1_type_set = spu_hw_signal1_type_set,
332 .signal1_type_get = spu_hw_signal1_type_get,
333 .signal2_type_set = spu_hw_signal2_type_set,
334 .signal2_type_get = spu_hw_signal2_type_get,
335 .npc_read = spu_hw_npc_read,
336 .npc_write = spu_hw_npc_write,
337 .status_read = spu_hw_status_read,
338 .get_ls = spu_hw_get_ls,
339 .privcntl_write = spu_hw_privcntl_write,
340 .runcntl_read = spu_hw_runcntl_read,
341 .runcntl_write = spu_hw_runcntl_write,
342 .runcntl_stop = spu_hw_runcntl_stop,
343 .master_start = spu_hw_master_start,
344 .master_stop = spu_hw_master_stop,
345 .set_mfc_query = spu_hw_set_mfc_query,
346 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
347 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
348 .send_mfc_command = spu_hw_send_mfc_command,
349 .restart_dma = spu_hw_restart_dma,