PM / Domains: Try power off masters in error path of __pm_genpd_poweron()
[linux/fpc-iii.git] / drivers / infiniband / core / mad_rmpp.c
blob382941b46e43aaef78219ad3437598b87a63504e
1 /*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2014 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <linux/slab.h>
37 #include "mad_priv.h"
38 #include "mad_rmpp.h"
40 enum rmpp_state {
41 RMPP_STATE_ACTIVE,
42 RMPP_STATE_TIMEOUT,
43 RMPP_STATE_COMPLETE,
44 RMPP_STATE_CANCELING
47 struct mad_rmpp_recv {
48 struct ib_mad_agent_private *agent;
49 struct list_head list;
50 struct delayed_work timeout_work;
51 struct delayed_work cleanup_work;
52 struct completion comp;
53 enum rmpp_state state;
54 spinlock_t lock;
55 atomic_t refcount;
57 struct ib_ah *ah;
58 struct ib_mad_recv_wc *rmpp_wc;
59 struct ib_mad_recv_buf *cur_seg_buf;
60 int last_ack;
61 int seg_num;
62 int newwin;
63 int repwin;
65 __be64 tid;
66 u32 src_qp;
67 u16 slid;
68 u8 mgmt_class;
69 u8 class_version;
70 u8 method;
71 u8 base_version;
74 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
76 if (atomic_dec_and_test(&rmpp_recv->refcount))
77 complete(&rmpp_recv->comp);
80 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
82 deref_rmpp_recv(rmpp_recv);
83 wait_for_completion(&rmpp_recv->comp);
84 ib_destroy_ah(rmpp_recv->ah);
85 kfree(rmpp_recv);
88 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
90 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
91 unsigned long flags;
93 spin_lock_irqsave(&agent->lock, flags);
94 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
95 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
96 ib_free_recv_mad(rmpp_recv->rmpp_wc);
97 rmpp_recv->state = RMPP_STATE_CANCELING;
99 spin_unlock_irqrestore(&agent->lock, flags);
101 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
102 cancel_delayed_work(&rmpp_recv->timeout_work);
103 cancel_delayed_work(&rmpp_recv->cleanup_work);
106 flush_workqueue(agent->qp_info->port_priv->wq);
108 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
109 &agent->rmpp_list, list) {
110 list_del(&rmpp_recv->list);
111 destroy_rmpp_recv(rmpp_recv);
115 static void format_ack(struct ib_mad_send_buf *msg,
116 struct ib_rmpp_mad *data,
117 struct mad_rmpp_recv *rmpp_recv)
119 struct ib_rmpp_mad *ack = msg->mad;
120 unsigned long flags;
122 memcpy(ack, &data->mad_hdr, msg->hdr_len);
124 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
125 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
126 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
128 spin_lock_irqsave(&rmpp_recv->lock, flags);
129 rmpp_recv->last_ack = rmpp_recv->seg_num;
130 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
131 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
132 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
135 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
136 struct ib_mad_recv_wc *recv_wc)
138 struct ib_mad_send_buf *msg;
139 int ret, hdr_len;
141 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143 recv_wc->wc->pkey_index, 1, hdr_len,
144 0, GFP_KERNEL,
145 IB_MGMT_BASE_VERSION);
146 if (IS_ERR(msg))
147 return;
149 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
150 msg->ah = rmpp_recv->ah;
151 ret = ib_post_send_mad(msg, NULL);
152 if (ret)
153 ib_free_send_mad(msg);
156 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
157 struct ib_mad_recv_wc *recv_wc)
159 struct ib_mad_send_buf *msg;
160 struct ib_ah *ah;
161 int hdr_len;
163 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
164 recv_wc->recv_buf.grh, agent->port_num);
165 if (IS_ERR(ah))
166 return (void *) ah;
168 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
169 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
170 recv_wc->wc->pkey_index, 1,
171 hdr_len, 0, GFP_KERNEL,
172 IB_MGMT_BASE_VERSION);
173 if (IS_ERR(msg))
174 ib_destroy_ah(ah);
175 else {
176 msg->ah = ah;
177 msg->context[0] = ah;
180 return msg;
183 static void ack_ds_ack(struct ib_mad_agent_private *agent,
184 struct ib_mad_recv_wc *recv_wc)
186 struct ib_mad_send_buf *msg;
187 struct ib_rmpp_mad *rmpp_mad;
188 int ret;
190 msg = alloc_response_msg(&agent->agent, recv_wc);
191 if (IS_ERR(msg))
192 return;
194 rmpp_mad = msg->mad;
195 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
197 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
198 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
199 rmpp_mad->rmpp_hdr.seg_num = 0;
200 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
202 ret = ib_post_send_mad(msg, NULL);
203 if (ret) {
204 ib_destroy_ah(msg->ah);
205 ib_free_send_mad(msg);
209 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
211 if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
212 ib_destroy_ah(mad_send_wc->send_buf->ah);
213 ib_free_send_mad(mad_send_wc->send_buf);
216 static void nack_recv(struct ib_mad_agent_private *agent,
217 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
219 struct ib_mad_send_buf *msg;
220 struct ib_rmpp_mad *rmpp_mad;
221 int ret;
223 msg = alloc_response_msg(&agent->agent, recv_wc);
224 if (IS_ERR(msg))
225 return;
227 rmpp_mad = msg->mad;
228 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
230 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
231 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
232 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
233 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
234 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
235 rmpp_mad->rmpp_hdr.seg_num = 0;
236 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
238 ret = ib_post_send_mad(msg, NULL);
239 if (ret) {
240 ib_destroy_ah(msg->ah);
241 ib_free_send_mad(msg);
245 static void recv_timeout_handler(struct work_struct *work)
247 struct mad_rmpp_recv *rmpp_recv =
248 container_of(work, struct mad_rmpp_recv, timeout_work.work);
249 struct ib_mad_recv_wc *rmpp_wc;
250 unsigned long flags;
252 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
253 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
254 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
255 return;
257 rmpp_recv->state = RMPP_STATE_TIMEOUT;
258 list_del(&rmpp_recv->list);
259 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
261 rmpp_wc = rmpp_recv->rmpp_wc;
262 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
263 destroy_rmpp_recv(rmpp_recv);
264 ib_free_recv_mad(rmpp_wc);
267 static void recv_cleanup_handler(struct work_struct *work)
269 struct mad_rmpp_recv *rmpp_recv =
270 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
271 unsigned long flags;
273 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
274 if (rmpp_recv->state == RMPP_STATE_CANCELING) {
275 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
276 return;
278 list_del(&rmpp_recv->list);
279 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
280 destroy_rmpp_recv(rmpp_recv);
283 static struct mad_rmpp_recv *
284 create_rmpp_recv(struct ib_mad_agent_private *agent,
285 struct ib_mad_recv_wc *mad_recv_wc)
287 struct mad_rmpp_recv *rmpp_recv;
288 struct ib_mad_hdr *mad_hdr;
290 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
291 if (!rmpp_recv)
292 return NULL;
294 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
295 mad_recv_wc->wc,
296 mad_recv_wc->recv_buf.grh,
297 agent->agent.port_num);
298 if (IS_ERR(rmpp_recv->ah))
299 goto error;
301 rmpp_recv->agent = agent;
302 init_completion(&rmpp_recv->comp);
303 INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
304 INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
305 spin_lock_init(&rmpp_recv->lock);
306 rmpp_recv->state = RMPP_STATE_ACTIVE;
307 atomic_set(&rmpp_recv->refcount, 1);
309 rmpp_recv->rmpp_wc = mad_recv_wc;
310 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
311 rmpp_recv->newwin = 1;
312 rmpp_recv->seg_num = 1;
313 rmpp_recv->last_ack = 0;
314 rmpp_recv->repwin = 1;
316 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
317 rmpp_recv->tid = mad_hdr->tid;
318 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
319 rmpp_recv->slid = mad_recv_wc->wc->slid;
320 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
321 rmpp_recv->class_version = mad_hdr->class_version;
322 rmpp_recv->method = mad_hdr->method;
323 rmpp_recv->base_version = mad_hdr->base_version;
324 return rmpp_recv;
326 error: kfree(rmpp_recv);
327 return NULL;
330 static struct mad_rmpp_recv *
331 find_rmpp_recv(struct ib_mad_agent_private *agent,
332 struct ib_mad_recv_wc *mad_recv_wc)
334 struct mad_rmpp_recv *rmpp_recv;
335 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
337 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
338 if (rmpp_recv->tid == mad_hdr->tid &&
339 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
340 rmpp_recv->slid == mad_recv_wc->wc->slid &&
341 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
342 rmpp_recv->class_version == mad_hdr->class_version &&
343 rmpp_recv->method == mad_hdr->method)
344 return rmpp_recv;
346 return NULL;
349 static struct mad_rmpp_recv *
350 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
351 struct ib_mad_recv_wc *mad_recv_wc)
353 struct mad_rmpp_recv *rmpp_recv;
354 unsigned long flags;
356 spin_lock_irqsave(&agent->lock, flags);
357 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
358 if (rmpp_recv)
359 atomic_inc(&rmpp_recv->refcount);
360 spin_unlock_irqrestore(&agent->lock, flags);
361 return rmpp_recv;
364 static struct mad_rmpp_recv *
365 insert_rmpp_recv(struct ib_mad_agent_private *agent,
366 struct mad_rmpp_recv *rmpp_recv)
368 struct mad_rmpp_recv *cur_rmpp_recv;
370 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
371 if (!cur_rmpp_recv)
372 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
374 return cur_rmpp_recv;
377 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
379 struct ib_rmpp_mad *rmpp_mad;
381 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
382 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
385 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
387 struct ib_rmpp_mad *rmpp_mad;
389 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
390 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
393 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
394 struct ib_mad_recv_buf *seg)
396 if (seg->list.next == rmpp_list)
397 return NULL;
399 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
402 static inline int window_size(struct ib_mad_agent_private *agent)
404 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
407 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
408 int seg_num)
410 struct ib_mad_recv_buf *seg_buf;
411 int cur_seg_num;
413 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
414 cur_seg_num = get_seg_num(seg_buf);
415 if (seg_num > cur_seg_num)
416 return seg_buf;
417 if (seg_num == cur_seg_num)
418 break;
420 return NULL;
423 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
424 struct ib_mad_recv_buf *new_buf)
426 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
428 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
429 rmpp_recv->cur_seg_buf = new_buf;
430 rmpp_recv->seg_num++;
431 new_buf = get_next_seg(rmpp_list, new_buf);
435 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
437 struct ib_rmpp_mad *rmpp_mad;
438 int hdr_size, data_size, pad;
439 bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
440 rmpp_recv->agent->qp_info->port_priv->port_num);
442 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
444 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
445 if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
446 data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
447 pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
448 if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
449 pad = 0;
450 } else {
451 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
452 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
453 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
454 pad = 0;
457 return hdr_size + rmpp_recv->seg_num * data_size - pad;
460 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
462 struct ib_mad_recv_wc *rmpp_wc;
464 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
465 if (rmpp_recv->seg_num > 1)
466 cancel_delayed_work(&rmpp_recv->timeout_work);
468 rmpp_wc = rmpp_recv->rmpp_wc;
469 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
470 /* 10 seconds until we can find the packet lifetime */
471 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
472 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
473 return rmpp_wc;
476 static struct ib_mad_recv_wc *
477 continue_rmpp(struct ib_mad_agent_private *agent,
478 struct ib_mad_recv_wc *mad_recv_wc)
480 struct mad_rmpp_recv *rmpp_recv;
481 struct ib_mad_recv_buf *prev_buf;
482 struct ib_mad_recv_wc *done_wc;
483 int seg_num;
484 unsigned long flags;
486 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
487 if (!rmpp_recv)
488 goto drop1;
490 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
492 spin_lock_irqsave(&rmpp_recv->lock, flags);
493 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
494 (seg_num > rmpp_recv->newwin))
495 goto drop3;
497 if ((seg_num <= rmpp_recv->last_ack) ||
498 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
499 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
500 ack_recv(rmpp_recv, mad_recv_wc);
501 goto drop2;
504 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
505 if (!prev_buf)
506 goto drop3;
508 done_wc = NULL;
509 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
510 if (rmpp_recv->cur_seg_buf == prev_buf) {
511 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
512 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
513 rmpp_recv->state = RMPP_STATE_COMPLETE;
514 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
515 done_wc = complete_rmpp(rmpp_recv);
516 goto out;
517 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
518 rmpp_recv->newwin += window_size(agent);
519 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
520 ack_recv(rmpp_recv, mad_recv_wc);
521 goto out;
524 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
525 out:
526 deref_rmpp_recv(rmpp_recv);
527 return done_wc;
529 drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
530 drop2: deref_rmpp_recv(rmpp_recv);
531 drop1: ib_free_recv_mad(mad_recv_wc);
532 return NULL;
535 static struct ib_mad_recv_wc *
536 start_rmpp(struct ib_mad_agent_private *agent,
537 struct ib_mad_recv_wc *mad_recv_wc)
539 struct mad_rmpp_recv *rmpp_recv;
540 unsigned long flags;
542 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
543 if (!rmpp_recv) {
544 ib_free_recv_mad(mad_recv_wc);
545 return NULL;
548 spin_lock_irqsave(&agent->lock, flags);
549 if (insert_rmpp_recv(agent, rmpp_recv)) {
550 spin_unlock_irqrestore(&agent->lock, flags);
551 /* duplicate first MAD */
552 destroy_rmpp_recv(rmpp_recv);
553 return continue_rmpp(agent, mad_recv_wc);
555 atomic_inc(&rmpp_recv->refcount);
557 if (get_last_flag(&mad_recv_wc->recv_buf)) {
558 rmpp_recv->state = RMPP_STATE_COMPLETE;
559 spin_unlock_irqrestore(&agent->lock, flags);
560 complete_rmpp(rmpp_recv);
561 } else {
562 spin_unlock_irqrestore(&agent->lock, flags);
563 /* 40 seconds until we can find the packet lifetimes */
564 queue_delayed_work(agent->qp_info->port_priv->wq,
565 &rmpp_recv->timeout_work,
566 msecs_to_jiffies(40000));
567 rmpp_recv->newwin += window_size(agent);
568 ack_recv(rmpp_recv, mad_recv_wc);
569 mad_recv_wc = NULL;
571 deref_rmpp_recv(rmpp_recv);
572 return mad_recv_wc;
575 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
577 struct ib_rmpp_mad *rmpp_mad;
578 int timeout;
579 u32 paylen = 0;
581 rmpp_mad = mad_send_wr->send_buf.mad;
582 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
583 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
585 if (mad_send_wr->seg_num == 1) {
586 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
587 paylen = (mad_send_wr->send_buf.seg_count *
588 mad_send_wr->send_buf.seg_rmpp_size) -
589 mad_send_wr->pad;
592 if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
593 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
594 paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
596 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
598 /* 2 seconds for an ACK until we can find the packet lifetime */
599 timeout = mad_send_wr->send_buf.timeout_ms;
600 if (!timeout || timeout > 2000)
601 mad_send_wr->timeout = msecs_to_jiffies(2000);
603 return ib_send_mad(mad_send_wr);
606 static void abort_send(struct ib_mad_agent_private *agent,
607 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
609 struct ib_mad_send_wr_private *mad_send_wr;
610 struct ib_mad_send_wc wc;
611 unsigned long flags;
613 spin_lock_irqsave(&agent->lock, flags);
614 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
615 if (!mad_send_wr)
616 goto out; /* Unmatched send */
618 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
619 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
620 goto out; /* Send is already done */
622 ib_mark_mad_done(mad_send_wr);
623 spin_unlock_irqrestore(&agent->lock, flags);
625 wc.status = IB_WC_REM_ABORT_ERR;
626 wc.vendor_err = rmpp_status;
627 wc.send_buf = &mad_send_wr->send_buf;
628 ib_mad_complete_send_wr(mad_send_wr, &wc);
629 return;
630 out:
631 spin_unlock_irqrestore(&agent->lock, flags);
634 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
635 int seg_num)
637 struct list_head *list;
639 wr->last_ack = seg_num;
640 list = &wr->last_ack_seg->list;
641 list_for_each_entry(wr->last_ack_seg, list, list)
642 if (wr->last_ack_seg->num == seg_num)
643 break;
646 static void process_ds_ack(struct ib_mad_agent_private *agent,
647 struct ib_mad_recv_wc *mad_recv_wc, int newwin)
649 struct mad_rmpp_recv *rmpp_recv;
651 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
652 if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
653 rmpp_recv->repwin = newwin;
656 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
657 struct ib_mad_recv_wc *mad_recv_wc)
659 struct ib_mad_send_wr_private *mad_send_wr;
660 struct ib_rmpp_mad *rmpp_mad;
661 unsigned long flags;
662 int seg_num, newwin, ret;
664 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
665 if (rmpp_mad->rmpp_hdr.rmpp_status) {
666 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
667 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
668 return;
671 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
672 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
673 if (newwin < seg_num) {
674 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
675 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
676 return;
679 spin_lock_irqsave(&agent->lock, flags);
680 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
681 if (!mad_send_wr) {
682 if (!seg_num)
683 process_ds_ack(agent, mad_recv_wc, newwin);
684 goto out; /* Unmatched or DS RMPP ACK */
687 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
688 (mad_send_wr->timeout)) {
689 spin_unlock_irqrestore(&agent->lock, flags);
690 ack_ds_ack(agent, mad_recv_wc);
691 return; /* Repeated ACK for DS RMPP transaction */
694 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
695 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
696 goto out; /* Send is already done */
698 if (seg_num > mad_send_wr->send_buf.seg_count ||
699 seg_num > mad_send_wr->newwin) {
700 spin_unlock_irqrestore(&agent->lock, flags);
701 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
702 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
703 return;
706 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
707 goto out; /* Old ACK */
709 if (seg_num > mad_send_wr->last_ack) {
710 adjust_last_ack(mad_send_wr, seg_num);
711 mad_send_wr->retries_left = mad_send_wr->max_retries;
713 mad_send_wr->newwin = newwin;
714 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
715 /* If no response is expected, the ACK completes the send */
716 if (!mad_send_wr->send_buf.timeout_ms) {
717 struct ib_mad_send_wc wc;
719 ib_mark_mad_done(mad_send_wr);
720 spin_unlock_irqrestore(&agent->lock, flags);
722 wc.status = IB_WC_SUCCESS;
723 wc.vendor_err = 0;
724 wc.send_buf = &mad_send_wr->send_buf;
725 ib_mad_complete_send_wr(mad_send_wr, &wc);
726 return;
728 if (mad_send_wr->refcount == 1)
729 ib_reset_mad_timeout(mad_send_wr,
730 mad_send_wr->send_buf.timeout_ms);
731 spin_unlock_irqrestore(&agent->lock, flags);
732 ack_ds_ack(agent, mad_recv_wc);
733 return;
734 } else if (mad_send_wr->refcount == 1 &&
735 mad_send_wr->seg_num < mad_send_wr->newwin &&
736 mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
737 /* Send failure will just result in a timeout/retry */
738 ret = send_next_seg(mad_send_wr);
739 if (ret)
740 goto out;
742 mad_send_wr->refcount++;
743 list_move_tail(&mad_send_wr->agent_list,
744 &mad_send_wr->mad_agent_priv->send_list);
746 out:
747 spin_unlock_irqrestore(&agent->lock, flags);
750 static struct ib_mad_recv_wc *
751 process_rmpp_data(struct ib_mad_agent_private *agent,
752 struct ib_mad_recv_wc *mad_recv_wc)
754 struct ib_rmpp_hdr *rmpp_hdr;
755 u8 rmpp_status;
757 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
759 if (rmpp_hdr->rmpp_status) {
760 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
761 goto bad;
764 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
765 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
766 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
767 goto bad;
769 return start_rmpp(agent, mad_recv_wc);
770 } else {
771 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
772 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
773 goto bad;
775 return continue_rmpp(agent, mad_recv_wc);
777 bad:
778 nack_recv(agent, mad_recv_wc, rmpp_status);
779 ib_free_recv_mad(mad_recv_wc);
780 return NULL;
783 static void process_rmpp_stop(struct ib_mad_agent_private *agent,
784 struct ib_mad_recv_wc *mad_recv_wc)
786 struct ib_rmpp_mad *rmpp_mad;
788 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
790 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
791 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
792 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
793 } else
794 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
797 static void process_rmpp_abort(struct ib_mad_agent_private *agent,
798 struct ib_mad_recv_wc *mad_recv_wc)
800 struct ib_rmpp_mad *rmpp_mad;
802 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
804 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
805 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
806 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
807 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
808 } else
809 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
812 struct ib_mad_recv_wc *
813 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
814 struct ib_mad_recv_wc *mad_recv_wc)
816 struct ib_rmpp_mad *rmpp_mad;
818 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
819 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
820 return mad_recv_wc;
822 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
823 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
824 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
825 goto out;
828 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
829 case IB_MGMT_RMPP_TYPE_DATA:
830 return process_rmpp_data(agent, mad_recv_wc);
831 case IB_MGMT_RMPP_TYPE_ACK:
832 process_rmpp_ack(agent, mad_recv_wc);
833 break;
834 case IB_MGMT_RMPP_TYPE_STOP:
835 process_rmpp_stop(agent, mad_recv_wc);
836 break;
837 case IB_MGMT_RMPP_TYPE_ABORT:
838 process_rmpp_abort(agent, mad_recv_wc);
839 break;
840 default:
841 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
842 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
843 break;
845 out:
846 ib_free_recv_mad(mad_recv_wc);
847 return NULL;
850 static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
852 struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
853 struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
854 struct mad_rmpp_recv *rmpp_recv;
855 struct ib_ah_attr ah_attr;
856 unsigned long flags;
857 int newwin = 1;
859 if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
860 goto out;
862 spin_lock_irqsave(&agent->lock, flags);
863 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
864 if (rmpp_recv->tid != mad_hdr->tid ||
865 rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
866 rmpp_recv->class_version != mad_hdr->class_version ||
867 (rmpp_recv->method & IB_MGMT_METHOD_RESP))
868 continue;
870 if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
871 continue;
873 if (rmpp_recv->slid == ah_attr.dlid) {
874 newwin = rmpp_recv->repwin;
875 break;
878 spin_unlock_irqrestore(&agent->lock, flags);
879 out:
880 return newwin;
883 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
885 struct ib_rmpp_mad *rmpp_mad;
886 int ret;
888 rmpp_mad = mad_send_wr->send_buf.mad;
889 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
890 IB_MGMT_RMPP_FLAG_ACTIVE))
891 return IB_RMPP_RESULT_UNHANDLED;
893 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
894 mad_send_wr->seg_num = 1;
895 return IB_RMPP_RESULT_INTERNAL;
898 mad_send_wr->newwin = init_newwin(mad_send_wr);
900 /* We need to wait for the final ACK even if there isn't a response */
901 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
902 ret = send_next_seg(mad_send_wr);
903 if (!ret)
904 return IB_RMPP_RESULT_CONSUMED;
905 return ret;
908 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
909 struct ib_mad_send_wc *mad_send_wc)
911 struct ib_rmpp_mad *rmpp_mad;
912 int ret;
914 rmpp_mad = mad_send_wr->send_buf.mad;
915 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
916 IB_MGMT_RMPP_FLAG_ACTIVE))
917 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
919 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
920 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
922 if (mad_send_wc->status != IB_WC_SUCCESS ||
923 mad_send_wr->status != IB_WC_SUCCESS)
924 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
926 if (!mad_send_wr->timeout)
927 return IB_RMPP_RESULT_PROCESSED; /* Response received */
929 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
930 mad_send_wr->timeout =
931 msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
932 return IB_RMPP_RESULT_PROCESSED; /* Send done */
935 if (mad_send_wr->seg_num == mad_send_wr->newwin ||
936 mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
937 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
939 ret = send_next_seg(mad_send_wr);
940 if (ret) {
941 mad_send_wc->status = IB_WC_GENERAL_ERR;
942 return IB_RMPP_RESULT_PROCESSED;
944 return IB_RMPP_RESULT_CONSUMED;
947 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
949 struct ib_rmpp_mad *rmpp_mad;
950 int ret;
952 rmpp_mad = mad_send_wr->send_buf.mad;
953 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
954 IB_MGMT_RMPP_FLAG_ACTIVE))
955 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
957 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
958 return IB_RMPP_RESULT_PROCESSED;
960 mad_send_wr->seg_num = mad_send_wr->last_ack;
961 mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
963 ret = send_next_seg(mad_send_wr);
964 if (ret)
965 return IB_RMPP_RESULT_PROCESSED;
967 return IB_RMPP_RESULT_CONSUMED;