Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / infiniband / hw / ipath / ipath_uc.c
blob2dd8de20d221a657f670b6bc63b3706ac94f53c3
1 /*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_UC_##x
40 /**
41 * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
44 * Return 1 if constructed; otherwise, return 0.
46 int ipath_make_uc_req(struct ipath_qp *qp)
48 struct ipath_other_headers *ohdr;
49 struct ipath_swqe *wqe;
50 u32 hwords;
51 u32 bth0;
52 u32 len;
53 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
54 int ret = 0;
56 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
57 goto done;
59 ohdr = &qp->s_hdr.u.oth;
60 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
61 ohdr = &qp->s_hdr.u.l.oth;
63 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
64 hwords = 5;
65 bth0 = 1 << 22; /* Set M bit */
67 /* Get the next send request. */
68 wqe = get_swqe_ptr(qp, qp->s_cur);
69 qp->s_wqe = NULL;
70 switch (qp->s_state) {
71 default:
72 /* Check if send work queue is empty. */
73 if (qp->s_cur == qp->s_head)
74 goto done;
76 * Start a new request.
78 qp->s_psn = wqe->psn = qp->s_next_psn;
79 qp->s_sge.sge = wqe->sg_list[0];
80 qp->s_sge.sg_list = wqe->sg_list + 1;
81 qp->s_sge.num_sge = wqe->wr.num_sge;
82 qp->s_len = len = wqe->length;
83 switch (wqe->wr.opcode) {
84 case IB_WR_SEND:
85 case IB_WR_SEND_WITH_IMM:
86 if (len > pmtu) {
87 qp->s_state = OP(SEND_FIRST);
88 len = pmtu;
89 break;
91 if (wqe->wr.opcode == IB_WR_SEND)
92 qp->s_state = OP(SEND_ONLY);
93 else {
94 qp->s_state =
95 OP(SEND_ONLY_WITH_IMMEDIATE);
96 /* Immediate data comes after the BTH */
97 ohdr->u.imm_data = wqe->wr.imm_data;
98 hwords += 1;
100 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
101 bth0 |= 1 << 23;
102 qp->s_wqe = wqe;
103 if (++qp->s_cur >= qp->s_size)
104 qp->s_cur = 0;
105 break;
107 case IB_WR_RDMA_WRITE:
108 case IB_WR_RDMA_WRITE_WITH_IMM:
109 ohdr->u.rc.reth.vaddr =
110 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
111 ohdr->u.rc.reth.rkey =
112 cpu_to_be32(wqe->wr.wr.rdma.rkey);
113 ohdr->u.rc.reth.length = cpu_to_be32(len);
114 hwords += sizeof(struct ib_reth) / 4;
115 if (len > pmtu) {
116 qp->s_state = OP(RDMA_WRITE_FIRST);
117 len = pmtu;
118 break;
120 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
121 qp->s_state = OP(RDMA_WRITE_ONLY);
122 else {
123 qp->s_state =
124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
125 /* Immediate data comes after the RETH */
126 ohdr->u.rc.imm_data = wqe->wr.imm_data;
127 hwords += 1;
128 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
129 bth0 |= 1 << 23;
131 qp->s_wqe = wqe;
132 if (++qp->s_cur >= qp->s_size)
133 qp->s_cur = 0;
134 break;
136 default:
137 goto done;
139 break;
141 case OP(SEND_FIRST):
142 qp->s_state = OP(SEND_MIDDLE);
143 /* FALLTHROUGH */
144 case OP(SEND_MIDDLE):
145 len = qp->s_len;
146 if (len > pmtu) {
147 len = pmtu;
148 break;
150 if (wqe->wr.opcode == IB_WR_SEND)
151 qp->s_state = OP(SEND_LAST);
152 else {
153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
154 /* Immediate data comes after the BTH */
155 ohdr->u.imm_data = wqe->wr.imm_data;
156 hwords += 1;
158 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
159 bth0 |= 1 << 23;
160 qp->s_wqe = wqe;
161 if (++qp->s_cur >= qp->s_size)
162 qp->s_cur = 0;
163 break;
165 case OP(RDMA_WRITE_FIRST):
166 qp->s_state = OP(RDMA_WRITE_MIDDLE);
167 /* FALLTHROUGH */
168 case OP(RDMA_WRITE_MIDDLE):
169 len = qp->s_len;
170 if (len > pmtu) {
171 len = pmtu;
172 break;
174 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
175 qp->s_state = OP(RDMA_WRITE_LAST);
176 else {
177 qp->s_state =
178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
179 /* Immediate data comes after the BTH */
180 ohdr->u.imm_data = wqe->wr.imm_data;
181 hwords += 1;
182 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
183 bth0 |= 1 << 23;
185 qp->s_wqe = wqe;
186 if (++qp->s_cur >= qp->s_size)
187 qp->s_cur = 0;
188 break;
190 qp->s_len -= len;
191 qp->s_hdrwords = hwords;
192 qp->s_cur_sge = &qp->s_sge;
193 qp->s_cur_size = len;
194 ipath_make_ruc_header(to_idev(qp->ibqp.device),
195 qp, ohdr, bth0 | (qp->s_state << 24),
196 qp->s_next_psn++ & IPATH_PSN_MASK);
197 ret = 1;
199 done:
200 return ret;
204 * ipath_uc_rcv - handle an incoming UC packet
205 * @dev: the device the packet came in on
206 * @hdr: the header of the packet
207 * @has_grh: true if the packet has a GRH
208 * @data: the packet data
209 * @tlen: the length of the packet
210 * @qp: the QP for this packet.
212 * This is called from ipath_qp_rcv() to process an incoming UC packet
213 * for the given QP.
214 * Called at interrupt level.
216 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
217 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
219 struct ipath_other_headers *ohdr;
220 int opcode;
221 u32 hdrsize;
222 u32 psn;
223 u32 pad;
224 struct ib_wc wc;
225 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
226 struct ib_reth *reth;
227 int header_in_data;
229 /* Validate the SLID. See Ch. 9.6.1.5 */
230 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
231 goto done;
233 /* Check for GRH */
234 if (!has_grh) {
235 ohdr = &hdr->u.oth;
236 hdrsize = 8 + 12; /* LRH + BTH */
237 psn = be32_to_cpu(ohdr->bth[2]);
238 header_in_data = 0;
239 } else {
240 ohdr = &hdr->u.l.oth;
241 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
243 * The header with GRH is 60 bytes and the
244 * core driver sets the eager header buffer
245 * size to 56 bytes so the last 4 bytes of
246 * the BTH header (PSN) is in the data buffer.
248 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
249 if (header_in_data) {
250 psn = be32_to_cpu(((__be32 *) data)[0]);
251 data += sizeof(__be32);
252 } else
253 psn = be32_to_cpu(ohdr->bth[2]);
256 * The opcode is in the low byte when its in network order
257 * (top byte when in host order).
259 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
261 wc.imm_data = 0;
262 wc.wc_flags = 0;
264 /* Compare the PSN verses the expected PSN. */
265 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
267 * Handle a sequence error.
268 * Silently drop any current message.
270 qp->r_psn = psn;
271 inv:
272 qp->r_state = OP(SEND_LAST);
273 switch (opcode) {
274 case OP(SEND_FIRST):
275 case OP(SEND_ONLY):
276 case OP(SEND_ONLY_WITH_IMMEDIATE):
277 goto send_first;
279 case OP(RDMA_WRITE_FIRST):
280 case OP(RDMA_WRITE_ONLY):
281 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
282 goto rdma_first;
284 default:
285 dev->n_pkt_drops++;
286 goto done;
290 /* Check for opcode sequence errors. */
291 switch (qp->r_state) {
292 case OP(SEND_FIRST):
293 case OP(SEND_MIDDLE):
294 if (opcode == OP(SEND_MIDDLE) ||
295 opcode == OP(SEND_LAST) ||
296 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
297 break;
298 goto inv;
300 case OP(RDMA_WRITE_FIRST):
301 case OP(RDMA_WRITE_MIDDLE):
302 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
303 opcode == OP(RDMA_WRITE_LAST) ||
304 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
305 break;
306 goto inv;
308 default:
309 if (opcode == OP(SEND_FIRST) ||
310 opcode == OP(SEND_ONLY) ||
311 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
312 opcode == OP(RDMA_WRITE_FIRST) ||
313 opcode == OP(RDMA_WRITE_ONLY) ||
314 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
315 break;
316 goto inv;
319 /* OK, process the packet. */
320 switch (opcode) {
321 case OP(SEND_FIRST):
322 case OP(SEND_ONLY):
323 case OP(SEND_ONLY_WITH_IMMEDIATE):
324 send_first:
325 if (qp->r_reuse_sge) {
326 qp->r_reuse_sge = 0;
327 qp->r_sge = qp->s_rdma_read_sge;
328 } else if (!ipath_get_rwqe(qp, 0)) {
329 dev->n_pkt_drops++;
330 goto done;
332 /* Save the WQE so we can reuse it in case of an error. */
333 qp->s_rdma_read_sge = qp->r_sge;
334 qp->r_rcv_len = 0;
335 if (opcode == OP(SEND_ONLY))
336 goto send_last;
337 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
338 goto send_last_imm;
339 /* FALLTHROUGH */
340 case OP(SEND_MIDDLE):
341 /* Check for invalid length PMTU or posted rwqe len. */
342 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
343 qp->r_reuse_sge = 1;
344 dev->n_pkt_drops++;
345 goto done;
347 qp->r_rcv_len += pmtu;
348 if (unlikely(qp->r_rcv_len > qp->r_len)) {
349 qp->r_reuse_sge = 1;
350 dev->n_pkt_drops++;
351 goto done;
353 ipath_copy_sge(&qp->r_sge, data, pmtu);
354 break;
356 case OP(SEND_LAST_WITH_IMMEDIATE):
357 send_last_imm:
358 if (header_in_data) {
359 wc.imm_data = *(__be32 *) data;
360 data += sizeof(__be32);
361 } else {
362 /* Immediate data comes after BTH */
363 wc.imm_data = ohdr->u.imm_data;
365 hdrsize += 4;
366 wc.wc_flags = IB_WC_WITH_IMM;
367 /* FALLTHROUGH */
368 case OP(SEND_LAST):
369 send_last:
370 /* Get the number of bytes the message was padded by. */
371 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
372 /* Check for invalid length. */
373 /* XXX LAST len should be >= 1 */
374 if (unlikely(tlen < (hdrsize + pad + 4))) {
375 qp->r_reuse_sge = 1;
376 dev->n_pkt_drops++;
377 goto done;
379 /* Don't count the CRC. */
380 tlen -= (hdrsize + pad + 4);
381 wc.byte_len = tlen + qp->r_rcv_len;
382 if (unlikely(wc.byte_len > qp->r_len)) {
383 qp->r_reuse_sge = 1;
384 dev->n_pkt_drops++;
385 goto done;
387 /* XXX Need to free SGEs */
388 last_imm:
389 ipath_copy_sge(&qp->r_sge, data, tlen);
390 wc.wr_id = qp->r_wr_id;
391 wc.status = IB_WC_SUCCESS;
392 wc.opcode = IB_WC_RECV;
393 wc.vendor_err = 0;
394 wc.qp = &qp->ibqp;
395 wc.src_qp = qp->remote_qpn;
396 wc.pkey_index = 0;
397 wc.slid = qp->remote_ah_attr.dlid;
398 wc.sl = qp->remote_ah_attr.sl;
399 wc.dlid_path_bits = 0;
400 wc.port_num = 0;
401 /* Signal completion event if the solicited bit is set. */
402 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
403 (ohdr->bth[0] &
404 __constant_cpu_to_be32(1 << 23)) != 0);
405 break;
407 case OP(RDMA_WRITE_FIRST):
408 case OP(RDMA_WRITE_ONLY):
409 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
410 rdma_first:
411 /* RETH comes after BTH */
412 if (!header_in_data)
413 reth = &ohdr->u.rc.reth;
414 else {
415 reth = (struct ib_reth *)data;
416 data += sizeof(*reth);
418 hdrsize += sizeof(*reth);
419 qp->r_len = be32_to_cpu(reth->length);
420 qp->r_rcv_len = 0;
421 if (qp->r_len != 0) {
422 u32 rkey = be32_to_cpu(reth->rkey);
423 u64 vaddr = be64_to_cpu(reth->vaddr);
424 int ok;
426 /* Check rkey */
427 ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
428 vaddr, rkey,
429 IB_ACCESS_REMOTE_WRITE);
430 if (unlikely(!ok)) {
431 dev->n_pkt_drops++;
432 goto done;
434 } else {
435 qp->r_sge.sg_list = NULL;
436 qp->r_sge.sge.mr = NULL;
437 qp->r_sge.sge.vaddr = NULL;
438 qp->r_sge.sge.length = 0;
439 qp->r_sge.sge.sge_length = 0;
441 if (unlikely(!(qp->qp_access_flags &
442 IB_ACCESS_REMOTE_WRITE))) {
443 dev->n_pkt_drops++;
444 goto done;
446 if (opcode == OP(RDMA_WRITE_ONLY))
447 goto rdma_last;
448 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
449 goto rdma_last_imm;
450 /* FALLTHROUGH */
451 case OP(RDMA_WRITE_MIDDLE):
452 /* Check for invalid length PMTU or posted rwqe len. */
453 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
454 dev->n_pkt_drops++;
455 goto done;
457 qp->r_rcv_len += pmtu;
458 if (unlikely(qp->r_rcv_len > qp->r_len)) {
459 dev->n_pkt_drops++;
460 goto done;
462 ipath_copy_sge(&qp->r_sge, data, pmtu);
463 break;
465 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
466 rdma_last_imm:
467 if (header_in_data) {
468 wc.imm_data = *(__be32 *) data;
469 data += sizeof(__be32);
470 } else {
471 /* Immediate data comes after BTH */
472 wc.imm_data = ohdr->u.imm_data;
474 hdrsize += 4;
475 wc.wc_flags = IB_WC_WITH_IMM;
477 /* Get the number of bytes the message was padded by. */
478 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
479 /* Check for invalid length. */
480 /* XXX LAST len should be >= 1 */
481 if (unlikely(tlen < (hdrsize + pad + 4))) {
482 dev->n_pkt_drops++;
483 goto done;
485 /* Don't count the CRC. */
486 tlen -= (hdrsize + pad + 4);
487 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
488 dev->n_pkt_drops++;
489 goto done;
491 if (qp->r_reuse_sge)
492 qp->r_reuse_sge = 0;
493 else if (!ipath_get_rwqe(qp, 1)) {
494 dev->n_pkt_drops++;
495 goto done;
497 wc.byte_len = qp->r_len;
498 goto last_imm;
500 case OP(RDMA_WRITE_LAST):
501 rdma_last:
502 /* Get the number of bytes the message was padded by. */
503 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
504 /* Check for invalid length. */
505 /* XXX LAST len should be >= 1 */
506 if (unlikely(tlen < (hdrsize + pad + 4))) {
507 dev->n_pkt_drops++;
508 goto done;
510 /* Don't count the CRC. */
511 tlen -= (hdrsize + pad + 4);
512 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
513 dev->n_pkt_drops++;
514 goto done;
516 ipath_copy_sge(&qp->r_sge, data, tlen);
517 break;
519 default:
520 /* Drop packet for unknown opcodes. */
521 dev->n_pkt_drops++;
522 goto done;
524 qp->r_psn++;
525 qp->r_state = opcode;
526 done:
527 return;