spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / net / wimax / i2400m / sdio-tx.c
blobb53cd1c80e3e92900a85f81eb7cd85b538f78115
1 /*
2 * Intel Wireless WiMAX Connection 2400m
3 * SDIO TX transaction backends
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Intel Corporation <linux-wimax@intel.com>
36 * Dirk Brandewie <dirk.j.brandewie@intel.com>
37 * - Initial implementation
40 * Takes the TX messages in the i2400m's driver TX FIFO and sends them
41 * to the device until there are no more.
43 * If we fail sending the message, we just drop it. There isn't much
44 * we can do at this point. Most of the traffic is network, which has
45 * recovery methods for dropped packets.
47 * The SDIO functions are not atomic, so we can't run from the context
48 * where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
49 * (some times atomic). Thus, the actual TX work is deferred to a
50 * workqueue.
52 * ROADMAP
54 * i2400ms_bus_tx_kick()
55 * i2400ms_tx_submit() [through workqueue]
57 * i2400m_tx_setup()
59 * i2400m_tx_release()
61 #include <linux/mmc/sdio_func.h>
62 #include "i2400m-sdio.h"
64 #define D_SUBMODULE tx
65 #include "sdio-debug-levels.h"
69 * Pull TX transations from the TX FIFO and send them to the device
70 * until there are no more.
72 static
73 void i2400ms_tx_submit(struct work_struct *ws)
75 int result;
76 struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
77 struct i2400m *i2400m = &i2400ms->i2400m;
78 struct sdio_func *func = i2400ms->func;
79 struct device *dev = &func->dev;
80 struct i2400m_msg_hdr *tx_msg;
81 size_t tx_msg_size;
83 d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
85 while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
86 d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
87 d_dump(5, dev, tx_msg, tx_msg_size);
89 sdio_claim_host(func);
90 result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
91 sdio_release_host(func);
93 i2400m_tx_msg_sent(i2400m);
95 if (result < 0) {
96 dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
97 " %d\n", (void *) tx_msg - i2400m->tx_buf,
98 tx_msg_size, result);
101 if (result == -ETIMEDOUT) {
102 i2400m_error_recovery(i2400m);
103 break;
105 d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
108 d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
113 * The generic driver notifies us that there is data ready for TX
115 * Schedule a run of i2400ms_tx_submit() to handle it.
117 void i2400ms_bus_tx_kick(struct i2400m *i2400m)
119 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
120 struct device *dev = &i2400ms->func->dev;
121 unsigned long flags;
123 d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
125 /* schedule tx work, this is because tx may block, therefore
126 * it has to run in a thread context.
128 spin_lock_irqsave(&i2400m->tx_lock, flags);
129 if (i2400ms->tx_workqueue != NULL)
130 queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
131 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
133 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
136 int i2400ms_tx_setup(struct i2400ms *i2400ms)
138 int result;
139 struct device *dev = &i2400ms->func->dev;
140 struct i2400m *i2400m = &i2400ms->i2400m;
141 struct workqueue_struct *tx_workqueue;
142 unsigned long flags;
144 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
146 INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
147 snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
148 "%s-tx", i2400m->wimax_dev.name);
149 tx_workqueue =
150 create_singlethread_workqueue(i2400ms->tx_wq_name);
151 if (tx_workqueue == NULL) {
152 dev_err(dev, "TX: failed to create workqueue\n");
153 result = -ENOMEM;
154 } else
155 result = 0;
156 spin_lock_irqsave(&i2400m->tx_lock, flags);
157 i2400ms->tx_workqueue = tx_workqueue;
158 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
159 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
160 return result;
163 void i2400ms_tx_release(struct i2400ms *i2400ms)
165 struct i2400m *i2400m = &i2400ms->i2400m;
166 struct workqueue_struct *tx_workqueue;
167 unsigned long flags;
169 tx_workqueue = i2400ms->tx_workqueue;
171 spin_lock_irqsave(&i2400m->tx_lock, flags);
172 i2400ms->tx_workqueue = NULL;
173 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
175 if (tx_workqueue)
176 destroy_workqueue(tx_workqueue);