treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / core / mad_rmpp.c
blob5ec57abc0849159a9e4adce96bc549e3018f629d
1 /*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2014 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <linux/slab.h>
37 #include "mad_priv.h"
38 #include "mad_rmpp.h"
40 enum rmpp_state {
41 RMPP_STATE_ACTIVE,
42 RMPP_STATE_TIMEOUT,
43 RMPP_STATE_COMPLETE,
44 RMPP_STATE_CANCELING
47 struct mad_rmpp_recv {
48 struct ib_mad_agent_private *agent;
49 struct list_head list;
50 struct delayed_work timeout_work;
51 struct delayed_work cleanup_work;
52 struct completion comp;
53 enum rmpp_state state;
54 spinlock_t lock;
55 atomic_t refcount;
57 struct ib_ah *ah;
58 struct ib_mad_recv_wc *rmpp_wc;
59 struct ib_mad_recv_buf *cur_seg_buf;
60 int last_ack;
61 int seg_num;
62 int newwin;
63 int repwin;
65 __be64 tid;
66 u32 src_qp;
67 u32 slid;
68 u8 mgmt_class;
69 u8 class_version;
70 u8 method;
71 u8 base_version;
74 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
76 if (atomic_dec_and_test(&rmpp_recv->refcount))
77 complete(&rmpp_recv->comp);
80 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
82 deref_rmpp_recv(rmpp_recv);
83 wait_for_completion(&rmpp_recv->comp);
84 rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE);
85 kfree(rmpp_recv);
88 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
90 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
91 unsigned long flags;
93 spin_lock_irqsave(&agent->lock, flags);
94 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
95 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
96 ib_free_recv_mad(rmpp_recv->rmpp_wc);
97 rmpp_recv->state = RMPP_STATE_CANCELING;
99 spin_unlock_irqrestore(&agent->lock, flags);
101 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
102 cancel_delayed_work(&rmpp_recv->timeout_work);
103 cancel_delayed_work(&rmpp_recv->cleanup_work);
106 flush_workqueue(agent->qp_info->port_priv->wq);
108 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
109 &agent->rmpp_list, list) {
110 list_del(&rmpp_recv->list);
111 destroy_rmpp_recv(rmpp_recv);
115 static void format_ack(struct ib_mad_send_buf *msg,
116 struct ib_rmpp_mad *data,
117 struct mad_rmpp_recv *rmpp_recv)
119 struct ib_rmpp_mad *ack = msg->mad;
120 unsigned long flags;
122 memcpy(ack, &data->mad_hdr, msg->hdr_len);
124 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
125 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
126 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
128 spin_lock_irqsave(&rmpp_recv->lock, flags);
129 rmpp_recv->last_ack = rmpp_recv->seg_num;
130 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
131 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
132 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
135 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
136 struct ib_mad_recv_wc *recv_wc)
138 struct ib_mad_send_buf *msg;
139 int ret, hdr_len;
141 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143 recv_wc->wc->pkey_index, 1, hdr_len,
144 0, GFP_KERNEL,
145 IB_MGMT_BASE_VERSION);
146 if (IS_ERR(msg))
147 return;
149 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
150 msg->ah = rmpp_recv->ah;
151 ret = ib_post_send_mad(msg, NULL);
152 if (ret)
153 ib_free_send_mad(msg);
156 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
157 struct ib_mad_recv_wc *recv_wc)
159 struct ib_mad_send_buf *msg;
160 struct ib_ah *ah;
161 int hdr_len;
163 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
164 recv_wc->recv_buf.grh, agent->port_num);
165 if (IS_ERR(ah))
166 return (void *) ah;
168 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
169 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
170 recv_wc->wc->pkey_index, 1,
171 hdr_len, 0, GFP_KERNEL,
172 IB_MGMT_BASE_VERSION);
173 if (IS_ERR(msg))
174 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
175 else {
176 msg->ah = ah;
177 msg->context[0] = ah;
180 return msg;
183 static void ack_ds_ack(struct ib_mad_agent_private *agent,
184 struct ib_mad_recv_wc *recv_wc)
186 struct ib_mad_send_buf *msg;
187 struct ib_rmpp_mad *rmpp_mad;
188 int ret;
190 msg = alloc_response_msg(&agent->agent, recv_wc);
191 if (IS_ERR(msg))
192 return;
194 rmpp_mad = msg->mad;
195 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
197 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
198 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
199 rmpp_mad->rmpp_hdr.seg_num = 0;
200 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
202 ret = ib_post_send_mad(msg, NULL);
203 if (ret) {
204 rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
205 ib_free_send_mad(msg);
209 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
211 if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
212 rdma_destroy_ah(mad_send_wc->send_buf->ah,
213 RDMA_DESTROY_AH_SLEEPABLE);
214 ib_free_send_mad(mad_send_wc->send_buf);
217 static void nack_recv(struct ib_mad_agent_private *agent,
218 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
220 struct ib_mad_send_buf *msg;
221 struct ib_rmpp_mad *rmpp_mad;
222 int ret;
224 msg = alloc_response_msg(&agent->agent, recv_wc);
225 if (IS_ERR(msg))
226 return;
228 rmpp_mad = msg->mad;
229 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
231 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
232 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
233 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
234 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
235 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
236 rmpp_mad->rmpp_hdr.seg_num = 0;
237 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
239 ret = ib_post_send_mad(msg, NULL);
240 if (ret) {
241 rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
242 ib_free_send_mad(msg);
246 static void recv_timeout_handler(struct work_struct *work)
248 struct mad_rmpp_recv *rmpp_recv =
249 container_of(work, struct mad_rmpp_recv, timeout_work.work);
250 struct ib_mad_recv_wc *rmpp_wc;
251 unsigned long flags;
253 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
254 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
255 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
256 return;
258 rmpp_recv->state = RMPP_STATE_TIMEOUT;
259 list_del(&rmpp_recv->list);
260 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
262 rmpp_wc = rmpp_recv->rmpp_wc;
263 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
264 destroy_rmpp_recv(rmpp_recv);
265 ib_free_recv_mad(rmpp_wc);
268 static void recv_cleanup_handler(struct work_struct *work)
270 struct mad_rmpp_recv *rmpp_recv =
271 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
272 unsigned long flags;
274 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
275 if (rmpp_recv->state == RMPP_STATE_CANCELING) {
276 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
277 return;
279 list_del(&rmpp_recv->list);
280 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
281 destroy_rmpp_recv(rmpp_recv);
284 static struct mad_rmpp_recv *
285 create_rmpp_recv(struct ib_mad_agent_private *agent,
286 struct ib_mad_recv_wc *mad_recv_wc)
288 struct mad_rmpp_recv *rmpp_recv;
289 struct ib_mad_hdr *mad_hdr;
291 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
292 if (!rmpp_recv)
293 return NULL;
295 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
296 mad_recv_wc->wc,
297 mad_recv_wc->recv_buf.grh,
298 agent->agent.port_num);
299 if (IS_ERR(rmpp_recv->ah))
300 goto error;
302 rmpp_recv->agent = agent;
303 init_completion(&rmpp_recv->comp);
304 INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
305 INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
306 spin_lock_init(&rmpp_recv->lock);
307 rmpp_recv->state = RMPP_STATE_ACTIVE;
308 atomic_set(&rmpp_recv->refcount, 1);
310 rmpp_recv->rmpp_wc = mad_recv_wc;
311 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
312 rmpp_recv->newwin = 1;
313 rmpp_recv->seg_num = 1;
314 rmpp_recv->last_ack = 0;
315 rmpp_recv->repwin = 1;
317 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
318 rmpp_recv->tid = mad_hdr->tid;
319 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
320 rmpp_recv->slid = mad_recv_wc->wc->slid;
321 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
322 rmpp_recv->class_version = mad_hdr->class_version;
323 rmpp_recv->method = mad_hdr->method;
324 rmpp_recv->base_version = mad_hdr->base_version;
325 return rmpp_recv;
327 error: kfree(rmpp_recv);
328 return NULL;
331 static struct mad_rmpp_recv *
332 find_rmpp_recv(struct ib_mad_agent_private *agent,
333 struct ib_mad_recv_wc *mad_recv_wc)
335 struct mad_rmpp_recv *rmpp_recv;
336 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
338 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
339 if (rmpp_recv->tid == mad_hdr->tid &&
340 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
341 rmpp_recv->slid == mad_recv_wc->wc->slid &&
342 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
343 rmpp_recv->class_version == mad_hdr->class_version &&
344 rmpp_recv->method == mad_hdr->method)
345 return rmpp_recv;
347 return NULL;
350 static struct mad_rmpp_recv *
351 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
352 struct ib_mad_recv_wc *mad_recv_wc)
354 struct mad_rmpp_recv *rmpp_recv;
355 unsigned long flags;
357 spin_lock_irqsave(&agent->lock, flags);
358 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
359 if (rmpp_recv)
360 atomic_inc(&rmpp_recv->refcount);
361 spin_unlock_irqrestore(&agent->lock, flags);
362 return rmpp_recv;
365 static struct mad_rmpp_recv *
366 insert_rmpp_recv(struct ib_mad_agent_private *agent,
367 struct mad_rmpp_recv *rmpp_recv)
369 struct mad_rmpp_recv *cur_rmpp_recv;
371 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
372 if (!cur_rmpp_recv)
373 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
375 return cur_rmpp_recv;
378 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
380 struct ib_rmpp_mad *rmpp_mad;
382 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
383 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
386 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
388 struct ib_rmpp_mad *rmpp_mad;
390 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
391 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
394 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
395 struct ib_mad_recv_buf *seg)
397 if (seg->list.next == rmpp_list)
398 return NULL;
400 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
403 static inline int window_size(struct ib_mad_agent_private *agent)
405 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
408 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
409 int seg_num)
411 struct ib_mad_recv_buf *seg_buf;
412 int cur_seg_num;
414 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
415 cur_seg_num = get_seg_num(seg_buf);
416 if (seg_num > cur_seg_num)
417 return seg_buf;
418 if (seg_num == cur_seg_num)
419 break;
421 return NULL;
424 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
425 struct ib_mad_recv_buf *new_buf)
427 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
429 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
430 rmpp_recv->cur_seg_buf = new_buf;
431 rmpp_recv->seg_num++;
432 new_buf = get_next_seg(rmpp_list, new_buf);
436 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
438 struct ib_rmpp_mad *rmpp_mad;
439 int hdr_size, data_size, pad;
440 bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
441 rmpp_recv->agent->qp_info->port_priv->port_num);
443 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
445 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
446 if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
447 data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
448 pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
449 if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
450 pad = 0;
451 } else {
452 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
453 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
454 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
455 pad = 0;
458 return hdr_size + rmpp_recv->seg_num * data_size - pad;
461 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
463 struct ib_mad_recv_wc *rmpp_wc;
465 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
466 if (rmpp_recv->seg_num > 1)
467 cancel_delayed_work(&rmpp_recv->timeout_work);
469 rmpp_wc = rmpp_recv->rmpp_wc;
470 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
471 /* 10 seconds until we can find the packet lifetime */
472 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
473 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
474 return rmpp_wc;
477 static struct ib_mad_recv_wc *
478 continue_rmpp(struct ib_mad_agent_private *agent,
479 struct ib_mad_recv_wc *mad_recv_wc)
481 struct mad_rmpp_recv *rmpp_recv;
482 struct ib_mad_recv_buf *prev_buf;
483 struct ib_mad_recv_wc *done_wc;
484 int seg_num;
485 unsigned long flags;
487 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
488 if (!rmpp_recv)
489 goto drop1;
491 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
493 spin_lock_irqsave(&rmpp_recv->lock, flags);
494 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
495 (seg_num > rmpp_recv->newwin))
496 goto drop3;
498 if ((seg_num <= rmpp_recv->last_ack) ||
499 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
500 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
501 ack_recv(rmpp_recv, mad_recv_wc);
502 goto drop2;
505 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
506 if (!prev_buf)
507 goto drop3;
509 done_wc = NULL;
510 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
511 if (rmpp_recv->cur_seg_buf == prev_buf) {
512 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
513 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
514 rmpp_recv->state = RMPP_STATE_COMPLETE;
515 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
516 done_wc = complete_rmpp(rmpp_recv);
517 goto out;
518 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
519 rmpp_recv->newwin += window_size(agent);
520 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
521 ack_recv(rmpp_recv, mad_recv_wc);
522 goto out;
525 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
526 out:
527 deref_rmpp_recv(rmpp_recv);
528 return done_wc;
530 drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
531 drop2: deref_rmpp_recv(rmpp_recv);
532 drop1: ib_free_recv_mad(mad_recv_wc);
533 return NULL;
536 static struct ib_mad_recv_wc *
537 start_rmpp(struct ib_mad_agent_private *agent,
538 struct ib_mad_recv_wc *mad_recv_wc)
540 struct mad_rmpp_recv *rmpp_recv;
541 unsigned long flags;
543 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
544 if (!rmpp_recv) {
545 ib_free_recv_mad(mad_recv_wc);
546 return NULL;
549 spin_lock_irqsave(&agent->lock, flags);
550 if (insert_rmpp_recv(agent, rmpp_recv)) {
551 spin_unlock_irqrestore(&agent->lock, flags);
552 /* duplicate first MAD */
553 destroy_rmpp_recv(rmpp_recv);
554 return continue_rmpp(agent, mad_recv_wc);
556 atomic_inc(&rmpp_recv->refcount);
558 if (get_last_flag(&mad_recv_wc->recv_buf)) {
559 rmpp_recv->state = RMPP_STATE_COMPLETE;
560 spin_unlock_irqrestore(&agent->lock, flags);
561 complete_rmpp(rmpp_recv);
562 } else {
563 spin_unlock_irqrestore(&agent->lock, flags);
564 /* 40 seconds until we can find the packet lifetimes */
565 queue_delayed_work(agent->qp_info->port_priv->wq,
566 &rmpp_recv->timeout_work,
567 msecs_to_jiffies(40000));
568 rmpp_recv->newwin += window_size(agent);
569 ack_recv(rmpp_recv, mad_recv_wc);
570 mad_recv_wc = NULL;
572 deref_rmpp_recv(rmpp_recv);
573 return mad_recv_wc;
576 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
578 struct ib_rmpp_mad *rmpp_mad;
579 int timeout;
580 u32 paylen = 0;
582 rmpp_mad = mad_send_wr->send_buf.mad;
583 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
584 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
586 if (mad_send_wr->seg_num == 1) {
587 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
588 paylen = (mad_send_wr->send_buf.seg_count *
589 mad_send_wr->send_buf.seg_rmpp_size) -
590 mad_send_wr->pad;
593 if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
594 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
595 paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
597 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
599 /* 2 seconds for an ACK until we can find the packet lifetime */
600 timeout = mad_send_wr->send_buf.timeout_ms;
601 if (!timeout || timeout > 2000)
602 mad_send_wr->timeout = msecs_to_jiffies(2000);
604 return ib_send_mad(mad_send_wr);
607 static void abort_send(struct ib_mad_agent_private *agent,
608 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
610 struct ib_mad_send_wr_private *mad_send_wr;
611 struct ib_mad_send_wc wc;
612 unsigned long flags;
614 spin_lock_irqsave(&agent->lock, flags);
615 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
616 if (!mad_send_wr)
617 goto out; /* Unmatched send */
619 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
620 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
621 goto out; /* Send is already done */
623 ib_mark_mad_done(mad_send_wr);
624 spin_unlock_irqrestore(&agent->lock, flags);
626 wc.status = IB_WC_REM_ABORT_ERR;
627 wc.vendor_err = rmpp_status;
628 wc.send_buf = &mad_send_wr->send_buf;
629 ib_mad_complete_send_wr(mad_send_wr, &wc);
630 return;
631 out:
632 spin_unlock_irqrestore(&agent->lock, flags);
635 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
636 int seg_num)
638 struct list_head *list;
640 wr->last_ack = seg_num;
641 list = &wr->last_ack_seg->list;
642 list_for_each_entry(wr->last_ack_seg, list, list)
643 if (wr->last_ack_seg->num == seg_num)
644 break;
647 static void process_ds_ack(struct ib_mad_agent_private *agent,
648 struct ib_mad_recv_wc *mad_recv_wc, int newwin)
650 struct mad_rmpp_recv *rmpp_recv;
652 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
653 if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
654 rmpp_recv->repwin = newwin;
657 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
658 struct ib_mad_recv_wc *mad_recv_wc)
660 struct ib_mad_send_wr_private *mad_send_wr;
661 struct ib_rmpp_mad *rmpp_mad;
662 unsigned long flags;
663 int seg_num, newwin, ret;
665 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
666 if (rmpp_mad->rmpp_hdr.rmpp_status) {
667 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
668 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
669 return;
672 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
673 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
674 if (newwin < seg_num) {
675 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
676 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
677 return;
680 spin_lock_irqsave(&agent->lock, flags);
681 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
682 if (!mad_send_wr) {
683 if (!seg_num)
684 process_ds_ack(agent, mad_recv_wc, newwin);
685 goto out; /* Unmatched or DS RMPP ACK */
688 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
689 (mad_send_wr->timeout)) {
690 spin_unlock_irqrestore(&agent->lock, flags);
691 ack_ds_ack(agent, mad_recv_wc);
692 return; /* Repeated ACK for DS RMPP transaction */
695 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
696 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
697 goto out; /* Send is already done */
699 if (seg_num > mad_send_wr->send_buf.seg_count ||
700 seg_num > mad_send_wr->newwin) {
701 spin_unlock_irqrestore(&agent->lock, flags);
702 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
703 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
704 return;
707 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
708 goto out; /* Old ACK */
710 if (seg_num > mad_send_wr->last_ack) {
711 adjust_last_ack(mad_send_wr, seg_num);
712 mad_send_wr->retries_left = mad_send_wr->max_retries;
714 mad_send_wr->newwin = newwin;
715 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
716 /* If no response is expected, the ACK completes the send */
717 if (!mad_send_wr->send_buf.timeout_ms) {
718 struct ib_mad_send_wc wc;
720 ib_mark_mad_done(mad_send_wr);
721 spin_unlock_irqrestore(&agent->lock, flags);
723 wc.status = IB_WC_SUCCESS;
724 wc.vendor_err = 0;
725 wc.send_buf = &mad_send_wr->send_buf;
726 ib_mad_complete_send_wr(mad_send_wr, &wc);
727 return;
729 if (mad_send_wr->refcount == 1)
730 ib_reset_mad_timeout(mad_send_wr,
731 mad_send_wr->send_buf.timeout_ms);
732 spin_unlock_irqrestore(&agent->lock, flags);
733 ack_ds_ack(agent, mad_recv_wc);
734 return;
735 } else if (mad_send_wr->refcount == 1 &&
736 mad_send_wr->seg_num < mad_send_wr->newwin &&
737 mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
738 /* Send failure will just result in a timeout/retry */
739 ret = send_next_seg(mad_send_wr);
740 if (ret)
741 goto out;
743 mad_send_wr->refcount++;
744 list_move_tail(&mad_send_wr->agent_list,
745 &mad_send_wr->mad_agent_priv->send_list);
747 out:
748 spin_unlock_irqrestore(&agent->lock, flags);
751 static struct ib_mad_recv_wc *
752 process_rmpp_data(struct ib_mad_agent_private *agent,
753 struct ib_mad_recv_wc *mad_recv_wc)
755 struct ib_rmpp_hdr *rmpp_hdr;
756 u8 rmpp_status;
758 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
760 if (rmpp_hdr->rmpp_status) {
761 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
762 goto bad;
765 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
766 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
767 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
768 goto bad;
770 return start_rmpp(agent, mad_recv_wc);
771 } else {
772 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
773 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
774 goto bad;
776 return continue_rmpp(agent, mad_recv_wc);
778 bad:
779 nack_recv(agent, mad_recv_wc, rmpp_status);
780 ib_free_recv_mad(mad_recv_wc);
781 return NULL;
784 static void process_rmpp_stop(struct ib_mad_agent_private *agent,
785 struct ib_mad_recv_wc *mad_recv_wc)
787 struct ib_rmpp_mad *rmpp_mad;
789 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
791 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
792 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
793 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
794 } else
795 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
798 static void process_rmpp_abort(struct ib_mad_agent_private *agent,
799 struct ib_mad_recv_wc *mad_recv_wc)
801 struct ib_rmpp_mad *rmpp_mad;
803 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
805 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
806 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
807 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
808 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
809 } else
810 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
813 struct ib_mad_recv_wc *
814 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
815 struct ib_mad_recv_wc *mad_recv_wc)
817 struct ib_rmpp_mad *rmpp_mad;
819 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
820 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
821 return mad_recv_wc;
823 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
824 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
825 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
826 goto out;
829 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
830 case IB_MGMT_RMPP_TYPE_DATA:
831 return process_rmpp_data(agent, mad_recv_wc);
832 case IB_MGMT_RMPP_TYPE_ACK:
833 process_rmpp_ack(agent, mad_recv_wc);
834 break;
835 case IB_MGMT_RMPP_TYPE_STOP:
836 process_rmpp_stop(agent, mad_recv_wc);
837 break;
838 case IB_MGMT_RMPP_TYPE_ABORT:
839 process_rmpp_abort(agent, mad_recv_wc);
840 break;
841 default:
842 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
843 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
844 break;
846 out:
847 ib_free_recv_mad(mad_recv_wc);
848 return NULL;
851 static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
853 struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
854 struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
855 struct mad_rmpp_recv *rmpp_recv;
856 struct rdma_ah_attr ah_attr;
857 unsigned long flags;
858 int newwin = 1;
860 if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
861 goto out;
863 spin_lock_irqsave(&agent->lock, flags);
864 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
865 if (rmpp_recv->tid != mad_hdr->tid ||
866 rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
867 rmpp_recv->class_version != mad_hdr->class_version ||
868 (rmpp_recv->method & IB_MGMT_METHOD_RESP))
869 continue;
871 if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
872 continue;
874 if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) {
875 newwin = rmpp_recv->repwin;
876 break;
879 spin_unlock_irqrestore(&agent->lock, flags);
880 out:
881 return newwin;
884 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
886 struct ib_rmpp_mad *rmpp_mad;
887 int ret;
889 rmpp_mad = mad_send_wr->send_buf.mad;
890 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
891 IB_MGMT_RMPP_FLAG_ACTIVE))
892 return IB_RMPP_RESULT_UNHANDLED;
894 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
895 mad_send_wr->seg_num = 1;
896 return IB_RMPP_RESULT_INTERNAL;
899 mad_send_wr->newwin = init_newwin(mad_send_wr);
901 /* We need to wait for the final ACK even if there isn't a response */
902 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
903 ret = send_next_seg(mad_send_wr);
904 if (!ret)
905 return IB_RMPP_RESULT_CONSUMED;
906 return ret;
909 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
910 struct ib_mad_send_wc *mad_send_wc)
912 struct ib_rmpp_mad *rmpp_mad;
913 int ret;
915 rmpp_mad = mad_send_wr->send_buf.mad;
916 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
917 IB_MGMT_RMPP_FLAG_ACTIVE))
918 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
920 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
921 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
923 if (mad_send_wc->status != IB_WC_SUCCESS ||
924 mad_send_wr->status != IB_WC_SUCCESS)
925 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
927 if (!mad_send_wr->timeout)
928 return IB_RMPP_RESULT_PROCESSED; /* Response received */
930 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
931 mad_send_wr->timeout =
932 msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
933 return IB_RMPP_RESULT_PROCESSED; /* Send done */
936 if (mad_send_wr->seg_num == mad_send_wr->newwin ||
937 mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
938 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
940 ret = send_next_seg(mad_send_wr);
941 if (ret) {
942 mad_send_wc->status = IB_WC_GENERAL_ERR;
943 return IB_RMPP_RESULT_PROCESSED;
945 return IB_RMPP_RESULT_CONSUMED;
948 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
950 struct ib_rmpp_mad *rmpp_mad;
951 int ret;
953 rmpp_mad = mad_send_wr->send_buf.mad;
954 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
955 IB_MGMT_RMPP_FLAG_ACTIVE))
956 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
958 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
959 return IB_RMPP_RESULT_PROCESSED;
961 mad_send_wr->seg_num = mad_send_wr->last_ack;
962 mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
964 ret = send_next_seg(mad_send_wr);
965 if (ret)
966 return IB_RMPP_RESULT_PROCESSED;
968 return IB_RMPP_RESULT_CONSUMED;