2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
45 struct mad_rmpp_recv
{
46 struct ib_mad_agent_private
*agent
;
47 struct list_head list
;
48 struct delayed_work timeout_work
;
49 struct delayed_work cleanup_work
;
50 struct completion comp
;
51 enum rmpp_state state
;
56 struct ib_mad_recv_wc
*rmpp_wc
;
57 struct ib_mad_recv_buf
*cur_seg_buf
;
71 static inline void deref_rmpp_recv(struct mad_rmpp_recv
*rmpp_recv
)
73 if (atomic_dec_and_test(&rmpp_recv
->refcount
))
74 complete(&rmpp_recv
->comp
);
77 static void destroy_rmpp_recv(struct mad_rmpp_recv
*rmpp_recv
)
79 deref_rmpp_recv(rmpp_recv
);
80 wait_for_completion(&rmpp_recv
->comp
);
81 ib_destroy_ah(rmpp_recv
->ah
);
85 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private
*agent
)
87 struct mad_rmpp_recv
*rmpp_recv
, *temp_rmpp_recv
;
90 spin_lock_irqsave(&agent
->lock
, flags
);
91 list_for_each_entry(rmpp_recv
, &agent
->rmpp_list
, list
) {
92 cancel_delayed_work(&rmpp_recv
->timeout_work
);
93 cancel_delayed_work(&rmpp_recv
->cleanup_work
);
95 spin_unlock_irqrestore(&agent
->lock
, flags
);
97 flush_workqueue(agent
->qp_info
->port_priv
->wq
);
99 list_for_each_entry_safe(rmpp_recv
, temp_rmpp_recv
,
100 &agent
->rmpp_list
, list
) {
101 list_del(&rmpp_recv
->list
);
102 if (rmpp_recv
->state
!= RMPP_STATE_COMPLETE
)
103 ib_free_recv_mad(rmpp_recv
->rmpp_wc
);
104 destroy_rmpp_recv(rmpp_recv
);
108 static void format_ack(struct ib_mad_send_buf
*msg
,
109 struct ib_rmpp_mad
*data
,
110 struct mad_rmpp_recv
*rmpp_recv
)
112 struct ib_rmpp_mad
*ack
= msg
->mad
;
115 memcpy(ack
, &data
->mad_hdr
, msg
->hdr_len
);
117 ack
->mad_hdr
.method
^= IB_MGMT_METHOD_RESP
;
118 ack
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_ACK
;
119 ib_set_rmpp_flags(&ack
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
121 spin_lock_irqsave(&rmpp_recv
->lock
, flags
);
122 rmpp_recv
->last_ack
= rmpp_recv
->seg_num
;
123 ack
->rmpp_hdr
.seg_num
= cpu_to_be32(rmpp_recv
->seg_num
);
124 ack
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(rmpp_recv
->newwin
);
125 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
128 static void ack_recv(struct mad_rmpp_recv
*rmpp_recv
,
129 struct ib_mad_recv_wc
*recv_wc
)
131 struct ib_mad_send_buf
*msg
;
134 hdr_len
= ib_get_mad_data_offset(recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
);
135 msg
= ib_create_send_mad(&rmpp_recv
->agent
->agent
, recv_wc
->wc
->src_qp
,
136 recv_wc
->wc
->pkey_index
, 1, hdr_len
,
141 format_ack(msg
, (struct ib_rmpp_mad
*) recv_wc
->recv_buf
.mad
, rmpp_recv
);
142 msg
->ah
= rmpp_recv
->ah
;
143 ret
= ib_post_send_mad(msg
, NULL
);
145 ib_free_send_mad(msg
);
148 static struct ib_mad_send_buf
*alloc_response_msg(struct ib_mad_agent
*agent
,
149 struct ib_mad_recv_wc
*recv_wc
)
151 struct ib_mad_send_buf
*msg
;
155 ah
= ib_create_ah_from_wc(agent
->qp
->pd
, recv_wc
->wc
,
156 recv_wc
->recv_buf
.grh
, agent
->port_num
);
160 hdr_len
= ib_get_mad_data_offset(recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
);
161 msg
= ib_create_send_mad(agent
, recv_wc
->wc
->src_qp
,
162 recv_wc
->wc
->pkey_index
, 1,
163 hdr_len
, 0, GFP_KERNEL
);
168 msg
->context
[0] = ah
;
174 static void ack_ds_ack(struct ib_mad_agent_private
*agent
,
175 struct ib_mad_recv_wc
*recv_wc
)
177 struct ib_mad_send_buf
*msg
;
178 struct ib_rmpp_mad
*rmpp_mad
;
181 msg
= alloc_response_msg(&agent
->agent
, recv_wc
);
186 memcpy(rmpp_mad
, recv_wc
->recv_buf
.mad
, msg
->hdr_len
);
188 rmpp_mad
->mad_hdr
.method
^= IB_MGMT_METHOD_RESP
;
189 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
190 rmpp_mad
->rmpp_hdr
.seg_num
= 0;
191 rmpp_mad
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(1);
193 ret
= ib_post_send_mad(msg
, NULL
);
195 ib_destroy_ah(msg
->ah
);
196 ib_free_send_mad(msg
);
200 void ib_rmpp_send_handler(struct ib_mad_send_wc
*mad_send_wc
)
202 if (mad_send_wc
->send_buf
->context
[0] == mad_send_wc
->send_buf
->ah
)
203 ib_destroy_ah(mad_send_wc
->send_buf
->ah
);
204 ib_free_send_mad(mad_send_wc
->send_buf
);
207 static void nack_recv(struct ib_mad_agent_private
*agent
,
208 struct ib_mad_recv_wc
*recv_wc
, u8 rmpp_status
)
210 struct ib_mad_send_buf
*msg
;
211 struct ib_rmpp_mad
*rmpp_mad
;
214 msg
= alloc_response_msg(&agent
->agent
, recv_wc
);
219 memcpy(rmpp_mad
, recv_wc
->recv_buf
.mad
, msg
->hdr_len
);
221 rmpp_mad
->mad_hdr
.method
^= IB_MGMT_METHOD_RESP
;
222 rmpp_mad
->rmpp_hdr
.rmpp_version
= IB_MGMT_RMPP_VERSION
;
223 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_ABORT
;
224 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
225 rmpp_mad
->rmpp_hdr
.rmpp_status
= rmpp_status
;
226 rmpp_mad
->rmpp_hdr
.seg_num
= 0;
227 rmpp_mad
->rmpp_hdr
.paylen_newwin
= 0;
229 ret
= ib_post_send_mad(msg
, NULL
);
231 ib_destroy_ah(msg
->ah
);
232 ib_free_send_mad(msg
);
236 static void recv_timeout_handler(struct work_struct
*work
)
238 struct mad_rmpp_recv
*rmpp_recv
=
239 container_of(work
, struct mad_rmpp_recv
, timeout_work
.work
);
240 struct ib_mad_recv_wc
*rmpp_wc
;
243 spin_lock_irqsave(&rmpp_recv
->agent
->lock
, flags
);
244 if (rmpp_recv
->state
!= RMPP_STATE_ACTIVE
) {
245 spin_unlock_irqrestore(&rmpp_recv
->agent
->lock
, flags
);
248 rmpp_recv
->state
= RMPP_STATE_TIMEOUT
;
249 list_del(&rmpp_recv
->list
);
250 spin_unlock_irqrestore(&rmpp_recv
->agent
->lock
, flags
);
252 rmpp_wc
= rmpp_recv
->rmpp_wc
;
253 nack_recv(rmpp_recv
->agent
, rmpp_wc
, IB_MGMT_RMPP_STATUS_T2L
);
254 destroy_rmpp_recv(rmpp_recv
);
255 ib_free_recv_mad(rmpp_wc
);
258 static void recv_cleanup_handler(struct work_struct
*work
)
260 struct mad_rmpp_recv
*rmpp_recv
=
261 container_of(work
, struct mad_rmpp_recv
, cleanup_work
.work
);
264 spin_lock_irqsave(&rmpp_recv
->agent
->lock
, flags
);
265 list_del(&rmpp_recv
->list
);
266 spin_unlock_irqrestore(&rmpp_recv
->agent
->lock
, flags
);
267 destroy_rmpp_recv(rmpp_recv
);
270 static struct mad_rmpp_recv
*
271 create_rmpp_recv(struct ib_mad_agent_private
*agent
,
272 struct ib_mad_recv_wc
*mad_recv_wc
)
274 struct mad_rmpp_recv
*rmpp_recv
;
275 struct ib_mad_hdr
*mad_hdr
;
277 rmpp_recv
= kmalloc(sizeof *rmpp_recv
, GFP_KERNEL
);
281 rmpp_recv
->ah
= ib_create_ah_from_wc(agent
->agent
.qp
->pd
,
283 mad_recv_wc
->recv_buf
.grh
,
284 agent
->agent
.port_num
);
285 if (IS_ERR(rmpp_recv
->ah
))
288 rmpp_recv
->agent
= agent
;
289 init_completion(&rmpp_recv
->comp
);
290 INIT_DELAYED_WORK(&rmpp_recv
->timeout_work
, recv_timeout_handler
);
291 INIT_DELAYED_WORK(&rmpp_recv
->cleanup_work
, recv_cleanup_handler
);
292 spin_lock_init(&rmpp_recv
->lock
);
293 rmpp_recv
->state
= RMPP_STATE_ACTIVE
;
294 atomic_set(&rmpp_recv
->refcount
, 1);
296 rmpp_recv
->rmpp_wc
= mad_recv_wc
;
297 rmpp_recv
->cur_seg_buf
= &mad_recv_wc
->recv_buf
;
298 rmpp_recv
->newwin
= 1;
299 rmpp_recv
->seg_num
= 1;
300 rmpp_recv
->last_ack
= 0;
301 rmpp_recv
->repwin
= 1;
303 mad_hdr
= &mad_recv_wc
->recv_buf
.mad
->mad_hdr
;
304 rmpp_recv
->tid
= mad_hdr
->tid
;
305 rmpp_recv
->src_qp
= mad_recv_wc
->wc
->src_qp
;
306 rmpp_recv
->slid
= mad_recv_wc
->wc
->slid
;
307 rmpp_recv
->mgmt_class
= mad_hdr
->mgmt_class
;
308 rmpp_recv
->class_version
= mad_hdr
->class_version
;
309 rmpp_recv
->method
= mad_hdr
->method
;
312 error
: kfree(rmpp_recv
);
316 static struct mad_rmpp_recv
*
317 find_rmpp_recv(struct ib_mad_agent_private
*agent
,
318 struct ib_mad_recv_wc
*mad_recv_wc
)
320 struct mad_rmpp_recv
*rmpp_recv
;
321 struct ib_mad_hdr
*mad_hdr
= &mad_recv_wc
->recv_buf
.mad
->mad_hdr
;
323 list_for_each_entry(rmpp_recv
, &agent
->rmpp_list
, list
) {
324 if (rmpp_recv
->tid
== mad_hdr
->tid
&&
325 rmpp_recv
->src_qp
== mad_recv_wc
->wc
->src_qp
&&
326 rmpp_recv
->slid
== mad_recv_wc
->wc
->slid
&&
327 rmpp_recv
->mgmt_class
== mad_hdr
->mgmt_class
&&
328 rmpp_recv
->class_version
== mad_hdr
->class_version
&&
329 rmpp_recv
->method
== mad_hdr
->method
)
335 static struct mad_rmpp_recv
*
336 acquire_rmpp_recv(struct ib_mad_agent_private
*agent
,
337 struct ib_mad_recv_wc
*mad_recv_wc
)
339 struct mad_rmpp_recv
*rmpp_recv
;
342 spin_lock_irqsave(&agent
->lock
, flags
);
343 rmpp_recv
= find_rmpp_recv(agent
, mad_recv_wc
);
345 atomic_inc(&rmpp_recv
->refcount
);
346 spin_unlock_irqrestore(&agent
->lock
, flags
);
350 static struct mad_rmpp_recv
*
351 insert_rmpp_recv(struct ib_mad_agent_private
*agent
,
352 struct mad_rmpp_recv
*rmpp_recv
)
354 struct mad_rmpp_recv
*cur_rmpp_recv
;
356 cur_rmpp_recv
= find_rmpp_recv(agent
, rmpp_recv
->rmpp_wc
);
358 list_add_tail(&rmpp_recv
->list
, &agent
->rmpp_list
);
360 return cur_rmpp_recv
;
363 static inline int get_last_flag(struct ib_mad_recv_buf
*seg
)
365 struct ib_rmpp_mad
*rmpp_mad
;
367 rmpp_mad
= (struct ib_rmpp_mad
*) seg
->mad
;
368 return ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) & IB_MGMT_RMPP_FLAG_LAST
;
371 static inline int get_seg_num(struct ib_mad_recv_buf
*seg
)
373 struct ib_rmpp_mad
*rmpp_mad
;
375 rmpp_mad
= (struct ib_rmpp_mad
*) seg
->mad
;
376 return be32_to_cpu(rmpp_mad
->rmpp_hdr
.seg_num
);
379 static inline struct ib_mad_recv_buf
* get_next_seg(struct list_head
*rmpp_list
,
380 struct ib_mad_recv_buf
*seg
)
382 if (seg
->list
.next
== rmpp_list
)
385 return container_of(seg
->list
.next
, struct ib_mad_recv_buf
, list
);
388 static inline int window_size(struct ib_mad_agent_private
*agent
)
390 return max(agent
->qp_info
->recv_queue
.max_active
>> 3, 1);
393 static struct ib_mad_recv_buf
* find_seg_location(struct list_head
*rmpp_list
,
396 struct ib_mad_recv_buf
*seg_buf
;
399 list_for_each_entry_reverse(seg_buf
, rmpp_list
, list
) {
400 cur_seg_num
= get_seg_num(seg_buf
);
401 if (seg_num
> cur_seg_num
)
403 if (seg_num
== cur_seg_num
)
409 static void update_seg_num(struct mad_rmpp_recv
*rmpp_recv
,
410 struct ib_mad_recv_buf
*new_buf
)
412 struct list_head
*rmpp_list
= &rmpp_recv
->rmpp_wc
->rmpp_list
;
414 while (new_buf
&& (get_seg_num(new_buf
) == rmpp_recv
->seg_num
+ 1)) {
415 rmpp_recv
->cur_seg_buf
= new_buf
;
416 rmpp_recv
->seg_num
++;
417 new_buf
= get_next_seg(rmpp_list
, new_buf
);
421 static inline int get_mad_len(struct mad_rmpp_recv
*rmpp_recv
)
423 struct ib_rmpp_mad
*rmpp_mad
;
424 int hdr_size
, data_size
, pad
;
426 rmpp_mad
= (struct ib_rmpp_mad
*)rmpp_recv
->cur_seg_buf
->mad
;
428 hdr_size
= ib_get_mad_data_offset(rmpp_mad
->mad_hdr
.mgmt_class
);
429 data_size
= sizeof(struct ib_rmpp_mad
) - hdr_size
;
430 pad
= IB_MGMT_RMPP_DATA
- be32_to_cpu(rmpp_mad
->rmpp_hdr
.paylen_newwin
);
431 if (pad
> IB_MGMT_RMPP_DATA
|| pad
< 0)
434 return hdr_size
+ rmpp_recv
->seg_num
* data_size
- pad
;
437 static struct ib_mad_recv_wc
* complete_rmpp(struct mad_rmpp_recv
*rmpp_recv
)
439 struct ib_mad_recv_wc
*rmpp_wc
;
441 ack_recv(rmpp_recv
, rmpp_recv
->rmpp_wc
);
442 if (rmpp_recv
->seg_num
> 1)
443 cancel_delayed_work(&rmpp_recv
->timeout_work
);
445 rmpp_wc
= rmpp_recv
->rmpp_wc
;
446 rmpp_wc
->mad_len
= get_mad_len(rmpp_recv
);
447 /* 10 seconds until we can find the packet lifetime */
448 queue_delayed_work(rmpp_recv
->agent
->qp_info
->port_priv
->wq
,
449 &rmpp_recv
->cleanup_work
, msecs_to_jiffies(10000));
453 static struct ib_mad_recv_wc
*
454 continue_rmpp(struct ib_mad_agent_private
*agent
,
455 struct ib_mad_recv_wc
*mad_recv_wc
)
457 struct mad_rmpp_recv
*rmpp_recv
;
458 struct ib_mad_recv_buf
*prev_buf
;
459 struct ib_mad_recv_wc
*done_wc
;
463 rmpp_recv
= acquire_rmpp_recv(agent
, mad_recv_wc
);
467 seg_num
= get_seg_num(&mad_recv_wc
->recv_buf
);
469 spin_lock_irqsave(&rmpp_recv
->lock
, flags
);
470 if ((rmpp_recv
->state
== RMPP_STATE_TIMEOUT
) ||
471 (seg_num
> rmpp_recv
->newwin
))
474 if ((seg_num
<= rmpp_recv
->last_ack
) ||
475 (rmpp_recv
->state
== RMPP_STATE_COMPLETE
)) {
476 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
477 ack_recv(rmpp_recv
, mad_recv_wc
);
481 prev_buf
= find_seg_location(&rmpp_recv
->rmpp_wc
->rmpp_list
, seg_num
);
486 list_add(&mad_recv_wc
->recv_buf
.list
, &prev_buf
->list
);
487 if (rmpp_recv
->cur_seg_buf
== prev_buf
) {
488 update_seg_num(rmpp_recv
, &mad_recv_wc
->recv_buf
);
489 if (get_last_flag(rmpp_recv
->cur_seg_buf
)) {
490 rmpp_recv
->state
= RMPP_STATE_COMPLETE
;
491 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
492 done_wc
= complete_rmpp(rmpp_recv
);
494 } else if (rmpp_recv
->seg_num
== rmpp_recv
->newwin
) {
495 rmpp_recv
->newwin
+= window_size(agent
);
496 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
497 ack_recv(rmpp_recv
, mad_recv_wc
);
501 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
503 deref_rmpp_recv(rmpp_recv
);
506 drop3
: spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
507 drop2
: deref_rmpp_recv(rmpp_recv
);
508 drop1
: ib_free_recv_mad(mad_recv_wc
);
512 static struct ib_mad_recv_wc
*
513 start_rmpp(struct ib_mad_agent_private
*agent
,
514 struct ib_mad_recv_wc
*mad_recv_wc
)
516 struct mad_rmpp_recv
*rmpp_recv
;
519 rmpp_recv
= create_rmpp_recv(agent
, mad_recv_wc
);
521 ib_free_recv_mad(mad_recv_wc
);
525 spin_lock_irqsave(&agent
->lock
, flags
);
526 if (insert_rmpp_recv(agent
, rmpp_recv
)) {
527 spin_unlock_irqrestore(&agent
->lock
, flags
);
528 /* duplicate first MAD */
529 destroy_rmpp_recv(rmpp_recv
);
530 return continue_rmpp(agent
, mad_recv_wc
);
532 atomic_inc(&rmpp_recv
->refcount
);
534 if (get_last_flag(&mad_recv_wc
->recv_buf
)) {
535 rmpp_recv
->state
= RMPP_STATE_COMPLETE
;
536 spin_unlock_irqrestore(&agent
->lock
, flags
);
537 complete_rmpp(rmpp_recv
);
539 spin_unlock_irqrestore(&agent
->lock
, flags
);
540 /* 40 seconds until we can find the packet lifetimes */
541 queue_delayed_work(agent
->qp_info
->port_priv
->wq
,
542 &rmpp_recv
->timeout_work
,
543 msecs_to_jiffies(40000));
544 rmpp_recv
->newwin
+= window_size(agent
);
545 ack_recv(rmpp_recv
, mad_recv_wc
);
548 deref_rmpp_recv(rmpp_recv
);
552 static int send_next_seg(struct ib_mad_send_wr_private
*mad_send_wr
)
554 struct ib_rmpp_mad
*rmpp_mad
;
558 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
559 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
560 rmpp_mad
->rmpp_hdr
.seg_num
= cpu_to_be32(++mad_send_wr
->seg_num
);
562 if (mad_send_wr
->seg_num
== 1) {
563 rmpp_mad
->rmpp_hdr
.rmpp_rtime_flags
|= IB_MGMT_RMPP_FLAG_FIRST
;
564 paylen
= mad_send_wr
->send_buf
.seg_count
* IB_MGMT_RMPP_DATA
-
568 if (mad_send_wr
->seg_num
== mad_send_wr
->send_buf
.seg_count
) {
569 rmpp_mad
->rmpp_hdr
.rmpp_rtime_flags
|= IB_MGMT_RMPP_FLAG_LAST
;
570 paylen
= IB_MGMT_RMPP_DATA
- mad_send_wr
->pad
;
572 rmpp_mad
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(paylen
);
574 /* 2 seconds for an ACK until we can find the packet lifetime */
575 timeout
= mad_send_wr
->send_buf
.timeout_ms
;
576 if (!timeout
|| timeout
> 2000)
577 mad_send_wr
->timeout
= msecs_to_jiffies(2000);
579 return ib_send_mad(mad_send_wr
);
582 static void abort_send(struct ib_mad_agent_private
*agent
,
583 struct ib_mad_recv_wc
*mad_recv_wc
, u8 rmpp_status
)
585 struct ib_mad_send_wr_private
*mad_send_wr
;
586 struct ib_mad_send_wc wc
;
589 spin_lock_irqsave(&agent
->lock
, flags
);
590 mad_send_wr
= ib_find_send_mad(agent
, mad_recv_wc
);
592 goto out
; /* Unmatched send */
594 if ((mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) ||
595 (!mad_send_wr
->timeout
) || (mad_send_wr
->status
!= IB_WC_SUCCESS
))
596 goto out
; /* Send is already done */
598 ib_mark_mad_done(mad_send_wr
);
599 spin_unlock_irqrestore(&agent
->lock
, flags
);
601 wc
.status
= IB_WC_REM_ABORT_ERR
;
602 wc
.vendor_err
= rmpp_status
;
603 wc
.send_buf
= &mad_send_wr
->send_buf
;
604 ib_mad_complete_send_wr(mad_send_wr
, &wc
);
607 spin_unlock_irqrestore(&agent
->lock
, flags
);
610 static inline void adjust_last_ack(struct ib_mad_send_wr_private
*wr
,
613 struct list_head
*list
;
615 wr
->last_ack
= seg_num
;
616 list
= &wr
->last_ack_seg
->list
;
617 list_for_each_entry(wr
->last_ack_seg
, list
, list
)
618 if (wr
->last_ack_seg
->num
== seg_num
)
622 static void process_ds_ack(struct ib_mad_agent_private
*agent
,
623 struct ib_mad_recv_wc
*mad_recv_wc
, int newwin
)
625 struct mad_rmpp_recv
*rmpp_recv
;
627 rmpp_recv
= find_rmpp_recv(agent
, mad_recv_wc
);
628 if (rmpp_recv
&& rmpp_recv
->state
== RMPP_STATE_COMPLETE
)
629 rmpp_recv
->repwin
= newwin
;
632 static void process_rmpp_ack(struct ib_mad_agent_private
*agent
,
633 struct ib_mad_recv_wc
*mad_recv_wc
)
635 struct ib_mad_send_wr_private
*mad_send_wr
;
636 struct ib_rmpp_mad
*rmpp_mad
;
638 int seg_num
, newwin
, ret
;
640 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
641 if (rmpp_mad
->rmpp_hdr
.rmpp_status
) {
642 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
643 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
647 seg_num
= be32_to_cpu(rmpp_mad
->rmpp_hdr
.seg_num
);
648 newwin
= be32_to_cpu(rmpp_mad
->rmpp_hdr
.paylen_newwin
);
649 if (newwin
< seg_num
) {
650 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_W2S
);
651 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_W2S
);
655 spin_lock_irqsave(&agent
->lock
, flags
);
656 mad_send_wr
= ib_find_send_mad(agent
, mad_recv_wc
);
659 process_ds_ack(agent
, mad_recv_wc
, newwin
);
660 goto out
; /* Unmatched or DS RMPP ACK */
663 if ((mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) &&
664 (mad_send_wr
->timeout
)) {
665 spin_unlock_irqrestore(&agent
->lock
, flags
);
666 ack_ds_ack(agent
, mad_recv_wc
);
667 return; /* Repeated ACK for DS RMPP transaction */
670 if ((mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) ||
671 (!mad_send_wr
->timeout
) || (mad_send_wr
->status
!= IB_WC_SUCCESS
))
672 goto out
; /* Send is already done */
674 if (seg_num
> mad_send_wr
->send_buf
.seg_count
||
675 seg_num
> mad_send_wr
->newwin
) {
676 spin_unlock_irqrestore(&agent
->lock
, flags
);
677 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_S2B
);
678 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_S2B
);
682 if (newwin
< mad_send_wr
->newwin
|| seg_num
< mad_send_wr
->last_ack
)
683 goto out
; /* Old ACK */
685 if (seg_num
> mad_send_wr
->last_ack
) {
686 adjust_last_ack(mad_send_wr
, seg_num
);
687 mad_send_wr
->retries_left
= mad_send_wr
->max_retries
;
689 mad_send_wr
->newwin
= newwin
;
690 if (mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) {
691 /* If no response is expected, the ACK completes the send */
692 if (!mad_send_wr
->send_buf
.timeout_ms
) {
693 struct ib_mad_send_wc wc
;
695 ib_mark_mad_done(mad_send_wr
);
696 spin_unlock_irqrestore(&agent
->lock
, flags
);
698 wc
.status
= IB_WC_SUCCESS
;
700 wc
.send_buf
= &mad_send_wr
->send_buf
;
701 ib_mad_complete_send_wr(mad_send_wr
, &wc
);
704 if (mad_send_wr
->refcount
== 1)
705 ib_reset_mad_timeout(mad_send_wr
,
706 mad_send_wr
->send_buf
.timeout_ms
);
707 spin_unlock_irqrestore(&agent
->lock
, flags
);
708 ack_ds_ack(agent
, mad_recv_wc
);
710 } else if (mad_send_wr
->refcount
== 1 &&
711 mad_send_wr
->seg_num
< mad_send_wr
->newwin
&&
712 mad_send_wr
->seg_num
< mad_send_wr
->send_buf
.seg_count
) {
713 /* Send failure will just result in a timeout/retry */
714 ret
= send_next_seg(mad_send_wr
);
718 mad_send_wr
->refcount
++;
719 list_move_tail(&mad_send_wr
->agent_list
,
720 &mad_send_wr
->mad_agent_priv
->send_list
);
723 spin_unlock_irqrestore(&agent
->lock
, flags
);
726 static struct ib_mad_recv_wc
*
727 process_rmpp_data(struct ib_mad_agent_private
*agent
,
728 struct ib_mad_recv_wc
*mad_recv_wc
)
730 struct ib_rmpp_hdr
*rmpp_hdr
;
733 rmpp_hdr
= &((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
;
735 if (rmpp_hdr
->rmpp_status
) {
736 rmpp_status
= IB_MGMT_RMPP_STATUS_BAD_STATUS
;
740 if (rmpp_hdr
->seg_num
== __constant_htonl(1)) {
741 if (!(ib_get_rmpp_flags(rmpp_hdr
) & IB_MGMT_RMPP_FLAG_FIRST
)) {
742 rmpp_status
= IB_MGMT_RMPP_STATUS_BAD_SEG
;
745 return start_rmpp(agent
, mad_recv_wc
);
747 if (ib_get_rmpp_flags(rmpp_hdr
) & IB_MGMT_RMPP_FLAG_FIRST
) {
748 rmpp_status
= IB_MGMT_RMPP_STATUS_BAD_SEG
;
751 return continue_rmpp(agent
, mad_recv_wc
);
754 nack_recv(agent
, mad_recv_wc
, rmpp_status
);
755 ib_free_recv_mad(mad_recv_wc
);
759 static void process_rmpp_stop(struct ib_mad_agent_private
*agent
,
760 struct ib_mad_recv_wc
*mad_recv_wc
)
762 struct ib_rmpp_mad
*rmpp_mad
;
764 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
766 if (rmpp_mad
->rmpp_hdr
.rmpp_status
!= IB_MGMT_RMPP_STATUS_RESX
) {
767 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
768 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
770 abort_send(agent
, mad_recv_wc
, rmpp_mad
->rmpp_hdr
.rmpp_status
);
773 static void process_rmpp_abort(struct ib_mad_agent_private
*agent
,
774 struct ib_mad_recv_wc
*mad_recv_wc
)
776 struct ib_rmpp_mad
*rmpp_mad
;
778 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
780 if (rmpp_mad
->rmpp_hdr
.rmpp_status
< IB_MGMT_RMPP_STATUS_ABORT_MIN
||
781 rmpp_mad
->rmpp_hdr
.rmpp_status
> IB_MGMT_RMPP_STATUS_ABORT_MAX
) {
782 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
783 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
785 abort_send(agent
, mad_recv_wc
, rmpp_mad
->rmpp_hdr
.rmpp_status
);
788 struct ib_mad_recv_wc
*
789 ib_process_rmpp_recv_wc(struct ib_mad_agent_private
*agent
,
790 struct ib_mad_recv_wc
*mad_recv_wc
)
792 struct ib_rmpp_mad
*rmpp_mad
;
794 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
795 if (!(rmpp_mad
->rmpp_hdr
.rmpp_rtime_flags
& IB_MGMT_RMPP_FLAG_ACTIVE
))
798 if (rmpp_mad
->rmpp_hdr
.rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
799 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_UNV
);
800 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_UNV
);
804 switch (rmpp_mad
->rmpp_hdr
.rmpp_type
) {
805 case IB_MGMT_RMPP_TYPE_DATA
:
806 return process_rmpp_data(agent
, mad_recv_wc
);
807 case IB_MGMT_RMPP_TYPE_ACK
:
808 process_rmpp_ack(agent
, mad_recv_wc
);
810 case IB_MGMT_RMPP_TYPE_STOP
:
811 process_rmpp_stop(agent
, mad_recv_wc
);
813 case IB_MGMT_RMPP_TYPE_ABORT
:
814 process_rmpp_abort(agent
, mad_recv_wc
);
817 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BADT
);
818 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BADT
);
822 ib_free_recv_mad(mad_recv_wc
);
826 static int init_newwin(struct ib_mad_send_wr_private
*mad_send_wr
)
828 struct ib_mad_agent_private
*agent
= mad_send_wr
->mad_agent_priv
;
829 struct ib_mad_hdr
*mad_hdr
= mad_send_wr
->send_buf
.mad
;
830 struct mad_rmpp_recv
*rmpp_recv
;
831 struct ib_ah_attr ah_attr
;
835 if (!(mad_hdr
->method
& IB_MGMT_METHOD_RESP
))
838 spin_lock_irqsave(&agent
->lock
, flags
);
839 list_for_each_entry(rmpp_recv
, &agent
->rmpp_list
, list
) {
840 if (rmpp_recv
->tid
!= mad_hdr
->tid
||
841 rmpp_recv
->mgmt_class
!= mad_hdr
->mgmt_class
||
842 rmpp_recv
->class_version
!= mad_hdr
->class_version
||
843 (rmpp_recv
->method
& IB_MGMT_METHOD_RESP
))
846 if (ib_query_ah(mad_send_wr
->send_buf
.ah
, &ah_attr
))
849 if (rmpp_recv
->slid
== ah_attr
.dlid
) {
850 newwin
= rmpp_recv
->repwin
;
854 spin_unlock_irqrestore(&agent
->lock
, flags
);
859 int ib_send_rmpp_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
861 struct ib_rmpp_mad
*rmpp_mad
;
864 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
865 if (!(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
866 IB_MGMT_RMPP_FLAG_ACTIVE
))
867 return IB_RMPP_RESULT_UNHANDLED
;
869 if (rmpp_mad
->rmpp_hdr
.rmpp_type
!= IB_MGMT_RMPP_TYPE_DATA
) {
870 mad_send_wr
->seg_num
= 1;
871 return IB_RMPP_RESULT_INTERNAL
;
874 mad_send_wr
->newwin
= init_newwin(mad_send_wr
);
876 /* We need to wait for the final ACK even if there isn't a response */
877 mad_send_wr
->refcount
+= (mad_send_wr
->timeout
== 0);
878 ret
= send_next_seg(mad_send_wr
);
880 return IB_RMPP_RESULT_CONSUMED
;
884 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private
*mad_send_wr
,
885 struct ib_mad_send_wc
*mad_send_wc
)
887 struct ib_rmpp_mad
*rmpp_mad
;
890 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
891 if (!(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
892 IB_MGMT_RMPP_FLAG_ACTIVE
))
893 return IB_RMPP_RESULT_UNHANDLED
; /* RMPP not active */
895 if (rmpp_mad
->rmpp_hdr
.rmpp_type
!= IB_MGMT_RMPP_TYPE_DATA
)
896 return IB_RMPP_RESULT_INTERNAL
; /* ACK, STOP, or ABORT */
898 if (mad_send_wc
->status
!= IB_WC_SUCCESS
||
899 mad_send_wr
->status
!= IB_WC_SUCCESS
)
900 return IB_RMPP_RESULT_PROCESSED
; /* Canceled or send error */
902 if (!mad_send_wr
->timeout
)
903 return IB_RMPP_RESULT_PROCESSED
; /* Response received */
905 if (mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) {
906 mad_send_wr
->timeout
=
907 msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
908 return IB_RMPP_RESULT_PROCESSED
; /* Send done */
911 if (mad_send_wr
->seg_num
== mad_send_wr
->newwin
||
912 mad_send_wr
->seg_num
== mad_send_wr
->send_buf
.seg_count
)
913 return IB_RMPP_RESULT_PROCESSED
; /* Wait for ACK */
915 ret
= send_next_seg(mad_send_wr
);
917 mad_send_wc
->status
= IB_WC_GENERAL_ERR
;
918 return IB_RMPP_RESULT_PROCESSED
;
920 return IB_RMPP_RESULT_CONSUMED
;
923 int ib_retry_rmpp(struct ib_mad_send_wr_private
*mad_send_wr
)
925 struct ib_rmpp_mad
*rmpp_mad
;
928 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
929 if (!(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
930 IB_MGMT_RMPP_FLAG_ACTIVE
))
931 return IB_RMPP_RESULT_UNHANDLED
; /* RMPP not active */
933 if (mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
)
934 return IB_RMPP_RESULT_PROCESSED
;
936 mad_send_wr
->seg_num
= mad_send_wr
->last_ack
;
937 mad_send_wr
->cur_seg
= mad_send_wr
->last_ack_seg
;
939 ret
= send_next_seg(mad_send_wr
);
941 return IB_RMPP_RESULT_PROCESSED
;
943 return IB_RMPP_RESULT_CONSUMED
;