2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
43 struct mad_rmpp_recv
{
44 struct ib_mad_agent_private
*agent
;
45 struct list_head list
;
46 struct delayed_work timeout_work
;
47 struct delayed_work cleanup_work
;
48 struct completion comp
;
49 enum rmpp_state state
;
54 struct ib_mad_recv_wc
*rmpp_wc
;
55 struct ib_mad_recv_buf
*cur_seg_buf
;
69 static inline void deref_rmpp_recv(struct mad_rmpp_recv
*rmpp_recv
)
71 if (atomic_dec_and_test(&rmpp_recv
->refcount
))
72 complete(&rmpp_recv
->comp
);
75 static void destroy_rmpp_recv(struct mad_rmpp_recv
*rmpp_recv
)
77 deref_rmpp_recv(rmpp_recv
);
78 wait_for_completion(&rmpp_recv
->comp
);
79 ib_destroy_ah(rmpp_recv
->ah
);
83 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private
*agent
)
85 struct mad_rmpp_recv
*rmpp_recv
, *temp_rmpp_recv
;
88 spin_lock_irqsave(&agent
->lock
, flags
);
89 list_for_each_entry(rmpp_recv
, &agent
->rmpp_list
, list
) {
90 cancel_delayed_work(&rmpp_recv
->timeout_work
);
91 cancel_delayed_work(&rmpp_recv
->cleanup_work
);
93 spin_unlock_irqrestore(&agent
->lock
, flags
);
95 flush_workqueue(agent
->qp_info
->port_priv
->wq
);
97 list_for_each_entry_safe(rmpp_recv
, temp_rmpp_recv
,
98 &agent
->rmpp_list
, list
) {
99 list_del(&rmpp_recv
->list
);
100 if (rmpp_recv
->state
!= RMPP_STATE_COMPLETE
)
101 ib_free_recv_mad(rmpp_recv
->rmpp_wc
);
102 destroy_rmpp_recv(rmpp_recv
);
106 static void format_ack(struct ib_mad_send_buf
*msg
,
107 struct ib_rmpp_mad
*data
,
108 struct mad_rmpp_recv
*rmpp_recv
)
110 struct ib_rmpp_mad
*ack
= msg
->mad
;
113 memcpy(ack
, &data
->mad_hdr
, msg
->hdr_len
);
115 ack
->mad_hdr
.method
^= IB_MGMT_METHOD_RESP
;
116 ack
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_ACK
;
117 ib_set_rmpp_flags(&ack
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
119 spin_lock_irqsave(&rmpp_recv
->lock
, flags
);
120 rmpp_recv
->last_ack
= rmpp_recv
->seg_num
;
121 ack
->rmpp_hdr
.seg_num
= cpu_to_be32(rmpp_recv
->seg_num
);
122 ack
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(rmpp_recv
->newwin
);
123 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
126 static void ack_recv(struct mad_rmpp_recv
*rmpp_recv
,
127 struct ib_mad_recv_wc
*recv_wc
)
129 struct ib_mad_send_buf
*msg
;
132 hdr_len
= ib_get_mad_data_offset(recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
);
133 msg
= ib_create_send_mad(&rmpp_recv
->agent
->agent
, recv_wc
->wc
->src_qp
,
134 recv_wc
->wc
->pkey_index
, 1, hdr_len
,
139 format_ack(msg
, (struct ib_rmpp_mad
*) recv_wc
->recv_buf
.mad
, rmpp_recv
);
140 msg
->ah
= rmpp_recv
->ah
;
141 ret
= ib_post_send_mad(msg
, NULL
);
143 ib_free_send_mad(msg
);
146 static struct ib_mad_send_buf
*alloc_response_msg(struct ib_mad_agent
*agent
,
147 struct ib_mad_recv_wc
*recv_wc
)
149 struct ib_mad_send_buf
*msg
;
153 ah
= ib_create_ah_from_wc(agent
->qp
->pd
, recv_wc
->wc
,
154 recv_wc
->recv_buf
.grh
, agent
->port_num
);
158 hdr_len
= ib_get_mad_data_offset(recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
);
159 msg
= ib_create_send_mad(agent
, recv_wc
->wc
->src_qp
,
160 recv_wc
->wc
->pkey_index
, 1,
161 hdr_len
, 0, GFP_KERNEL
);
166 msg
->context
[0] = ah
;
172 static void ack_ds_ack(struct ib_mad_agent_private
*agent
,
173 struct ib_mad_recv_wc
*recv_wc
)
175 struct ib_mad_send_buf
*msg
;
176 struct ib_rmpp_mad
*rmpp_mad
;
179 msg
= alloc_response_msg(&agent
->agent
, recv_wc
);
184 memcpy(rmpp_mad
, recv_wc
->recv_buf
.mad
, msg
->hdr_len
);
186 rmpp_mad
->mad_hdr
.method
^= IB_MGMT_METHOD_RESP
;
187 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
188 rmpp_mad
->rmpp_hdr
.seg_num
= 0;
189 rmpp_mad
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(1);
191 ret
= ib_post_send_mad(msg
, NULL
);
193 ib_destroy_ah(msg
->ah
);
194 ib_free_send_mad(msg
);
198 void ib_rmpp_send_handler(struct ib_mad_send_wc
*mad_send_wc
)
200 if (mad_send_wc
->send_buf
->context
[0] == mad_send_wc
->send_buf
->ah
)
201 ib_destroy_ah(mad_send_wc
->send_buf
->ah
);
202 ib_free_send_mad(mad_send_wc
->send_buf
);
205 static void nack_recv(struct ib_mad_agent_private
*agent
,
206 struct ib_mad_recv_wc
*recv_wc
, u8 rmpp_status
)
208 struct ib_mad_send_buf
*msg
;
209 struct ib_rmpp_mad
*rmpp_mad
;
212 msg
= alloc_response_msg(&agent
->agent
, recv_wc
);
217 memcpy(rmpp_mad
, recv_wc
->recv_buf
.mad
, msg
->hdr_len
);
219 rmpp_mad
->mad_hdr
.method
^= IB_MGMT_METHOD_RESP
;
220 rmpp_mad
->rmpp_hdr
.rmpp_version
= IB_MGMT_RMPP_VERSION
;
221 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_ABORT
;
222 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
223 rmpp_mad
->rmpp_hdr
.rmpp_status
= rmpp_status
;
224 rmpp_mad
->rmpp_hdr
.seg_num
= 0;
225 rmpp_mad
->rmpp_hdr
.paylen_newwin
= 0;
227 ret
= ib_post_send_mad(msg
, NULL
);
229 ib_destroy_ah(msg
->ah
);
230 ib_free_send_mad(msg
);
234 static void recv_timeout_handler(struct work_struct
*work
)
236 struct mad_rmpp_recv
*rmpp_recv
=
237 container_of(work
, struct mad_rmpp_recv
, timeout_work
.work
);
238 struct ib_mad_recv_wc
*rmpp_wc
;
241 spin_lock_irqsave(&rmpp_recv
->agent
->lock
, flags
);
242 if (rmpp_recv
->state
!= RMPP_STATE_ACTIVE
) {
243 spin_unlock_irqrestore(&rmpp_recv
->agent
->lock
, flags
);
246 rmpp_recv
->state
= RMPP_STATE_TIMEOUT
;
247 list_del(&rmpp_recv
->list
);
248 spin_unlock_irqrestore(&rmpp_recv
->agent
->lock
, flags
);
250 rmpp_wc
= rmpp_recv
->rmpp_wc
;
251 nack_recv(rmpp_recv
->agent
, rmpp_wc
, IB_MGMT_RMPP_STATUS_T2L
);
252 destroy_rmpp_recv(rmpp_recv
);
253 ib_free_recv_mad(rmpp_wc
);
256 static void recv_cleanup_handler(struct work_struct
*work
)
258 struct mad_rmpp_recv
*rmpp_recv
=
259 container_of(work
, struct mad_rmpp_recv
, cleanup_work
.work
);
262 spin_lock_irqsave(&rmpp_recv
->agent
->lock
, flags
);
263 list_del(&rmpp_recv
->list
);
264 spin_unlock_irqrestore(&rmpp_recv
->agent
->lock
, flags
);
265 destroy_rmpp_recv(rmpp_recv
);
268 static struct mad_rmpp_recv
*
269 create_rmpp_recv(struct ib_mad_agent_private
*agent
,
270 struct ib_mad_recv_wc
*mad_recv_wc
)
272 struct mad_rmpp_recv
*rmpp_recv
;
273 struct ib_mad_hdr
*mad_hdr
;
275 rmpp_recv
= kmalloc(sizeof *rmpp_recv
, GFP_KERNEL
);
279 rmpp_recv
->ah
= ib_create_ah_from_wc(agent
->agent
.qp
->pd
,
281 mad_recv_wc
->recv_buf
.grh
,
282 agent
->agent
.port_num
);
283 if (IS_ERR(rmpp_recv
->ah
))
286 rmpp_recv
->agent
= agent
;
287 init_completion(&rmpp_recv
->comp
);
288 INIT_DELAYED_WORK(&rmpp_recv
->timeout_work
, recv_timeout_handler
);
289 INIT_DELAYED_WORK(&rmpp_recv
->cleanup_work
, recv_cleanup_handler
);
290 spin_lock_init(&rmpp_recv
->lock
);
291 rmpp_recv
->state
= RMPP_STATE_ACTIVE
;
292 atomic_set(&rmpp_recv
->refcount
, 1);
294 rmpp_recv
->rmpp_wc
= mad_recv_wc
;
295 rmpp_recv
->cur_seg_buf
= &mad_recv_wc
->recv_buf
;
296 rmpp_recv
->newwin
= 1;
297 rmpp_recv
->seg_num
= 1;
298 rmpp_recv
->last_ack
= 0;
299 rmpp_recv
->repwin
= 1;
301 mad_hdr
= &mad_recv_wc
->recv_buf
.mad
->mad_hdr
;
302 rmpp_recv
->tid
= mad_hdr
->tid
;
303 rmpp_recv
->src_qp
= mad_recv_wc
->wc
->src_qp
;
304 rmpp_recv
->slid
= mad_recv_wc
->wc
->slid
;
305 rmpp_recv
->mgmt_class
= mad_hdr
->mgmt_class
;
306 rmpp_recv
->class_version
= mad_hdr
->class_version
;
307 rmpp_recv
->method
= mad_hdr
->method
;
310 error
: kfree(rmpp_recv
);
314 static struct mad_rmpp_recv
*
315 find_rmpp_recv(struct ib_mad_agent_private
*agent
,
316 struct ib_mad_recv_wc
*mad_recv_wc
)
318 struct mad_rmpp_recv
*rmpp_recv
;
319 struct ib_mad_hdr
*mad_hdr
= &mad_recv_wc
->recv_buf
.mad
->mad_hdr
;
321 list_for_each_entry(rmpp_recv
, &agent
->rmpp_list
, list
) {
322 if (rmpp_recv
->tid
== mad_hdr
->tid
&&
323 rmpp_recv
->src_qp
== mad_recv_wc
->wc
->src_qp
&&
324 rmpp_recv
->slid
== mad_recv_wc
->wc
->slid
&&
325 rmpp_recv
->mgmt_class
== mad_hdr
->mgmt_class
&&
326 rmpp_recv
->class_version
== mad_hdr
->class_version
&&
327 rmpp_recv
->method
== mad_hdr
->method
)
333 static struct mad_rmpp_recv
*
334 acquire_rmpp_recv(struct ib_mad_agent_private
*agent
,
335 struct ib_mad_recv_wc
*mad_recv_wc
)
337 struct mad_rmpp_recv
*rmpp_recv
;
340 spin_lock_irqsave(&agent
->lock
, flags
);
341 rmpp_recv
= find_rmpp_recv(agent
, mad_recv_wc
);
343 atomic_inc(&rmpp_recv
->refcount
);
344 spin_unlock_irqrestore(&agent
->lock
, flags
);
348 static struct mad_rmpp_recv
*
349 insert_rmpp_recv(struct ib_mad_agent_private
*agent
,
350 struct mad_rmpp_recv
*rmpp_recv
)
352 struct mad_rmpp_recv
*cur_rmpp_recv
;
354 cur_rmpp_recv
= find_rmpp_recv(agent
, rmpp_recv
->rmpp_wc
);
356 list_add_tail(&rmpp_recv
->list
, &agent
->rmpp_list
);
358 return cur_rmpp_recv
;
361 static inline int get_last_flag(struct ib_mad_recv_buf
*seg
)
363 struct ib_rmpp_mad
*rmpp_mad
;
365 rmpp_mad
= (struct ib_rmpp_mad
*) seg
->mad
;
366 return ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) & IB_MGMT_RMPP_FLAG_LAST
;
369 static inline int get_seg_num(struct ib_mad_recv_buf
*seg
)
371 struct ib_rmpp_mad
*rmpp_mad
;
373 rmpp_mad
= (struct ib_rmpp_mad
*) seg
->mad
;
374 return be32_to_cpu(rmpp_mad
->rmpp_hdr
.seg_num
);
377 static inline struct ib_mad_recv_buf
* get_next_seg(struct list_head
*rmpp_list
,
378 struct ib_mad_recv_buf
*seg
)
380 if (seg
->list
.next
== rmpp_list
)
383 return container_of(seg
->list
.next
, struct ib_mad_recv_buf
, list
);
386 static inline int window_size(struct ib_mad_agent_private
*agent
)
388 return max(agent
->qp_info
->recv_queue
.max_active
>> 3, 1);
391 static struct ib_mad_recv_buf
* find_seg_location(struct list_head
*rmpp_list
,
394 struct ib_mad_recv_buf
*seg_buf
;
397 list_for_each_entry_reverse(seg_buf
, rmpp_list
, list
) {
398 cur_seg_num
= get_seg_num(seg_buf
);
399 if (seg_num
> cur_seg_num
)
401 if (seg_num
== cur_seg_num
)
407 static void update_seg_num(struct mad_rmpp_recv
*rmpp_recv
,
408 struct ib_mad_recv_buf
*new_buf
)
410 struct list_head
*rmpp_list
= &rmpp_recv
->rmpp_wc
->rmpp_list
;
412 while (new_buf
&& (get_seg_num(new_buf
) == rmpp_recv
->seg_num
+ 1)) {
413 rmpp_recv
->cur_seg_buf
= new_buf
;
414 rmpp_recv
->seg_num
++;
415 new_buf
= get_next_seg(rmpp_list
, new_buf
);
419 static inline int get_mad_len(struct mad_rmpp_recv
*rmpp_recv
)
421 struct ib_rmpp_mad
*rmpp_mad
;
422 int hdr_size
, data_size
, pad
;
424 rmpp_mad
= (struct ib_rmpp_mad
*)rmpp_recv
->cur_seg_buf
->mad
;
426 hdr_size
= ib_get_mad_data_offset(rmpp_mad
->mad_hdr
.mgmt_class
);
427 data_size
= sizeof(struct ib_rmpp_mad
) - hdr_size
;
428 pad
= IB_MGMT_RMPP_DATA
- be32_to_cpu(rmpp_mad
->rmpp_hdr
.paylen_newwin
);
429 if (pad
> IB_MGMT_RMPP_DATA
|| pad
< 0)
432 return hdr_size
+ rmpp_recv
->seg_num
* data_size
- pad
;
435 static struct ib_mad_recv_wc
* complete_rmpp(struct mad_rmpp_recv
*rmpp_recv
)
437 struct ib_mad_recv_wc
*rmpp_wc
;
439 ack_recv(rmpp_recv
, rmpp_recv
->rmpp_wc
);
440 if (rmpp_recv
->seg_num
> 1)
441 cancel_delayed_work(&rmpp_recv
->timeout_work
);
443 rmpp_wc
= rmpp_recv
->rmpp_wc
;
444 rmpp_wc
->mad_len
= get_mad_len(rmpp_recv
);
445 /* 10 seconds until we can find the packet lifetime */
446 queue_delayed_work(rmpp_recv
->agent
->qp_info
->port_priv
->wq
,
447 &rmpp_recv
->cleanup_work
, msecs_to_jiffies(10000));
451 static struct ib_mad_recv_wc
*
452 continue_rmpp(struct ib_mad_agent_private
*agent
,
453 struct ib_mad_recv_wc
*mad_recv_wc
)
455 struct mad_rmpp_recv
*rmpp_recv
;
456 struct ib_mad_recv_buf
*prev_buf
;
457 struct ib_mad_recv_wc
*done_wc
;
461 rmpp_recv
= acquire_rmpp_recv(agent
, mad_recv_wc
);
465 seg_num
= get_seg_num(&mad_recv_wc
->recv_buf
);
467 spin_lock_irqsave(&rmpp_recv
->lock
, flags
);
468 if ((rmpp_recv
->state
== RMPP_STATE_TIMEOUT
) ||
469 (seg_num
> rmpp_recv
->newwin
))
472 if ((seg_num
<= rmpp_recv
->last_ack
) ||
473 (rmpp_recv
->state
== RMPP_STATE_COMPLETE
)) {
474 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
475 ack_recv(rmpp_recv
, mad_recv_wc
);
479 prev_buf
= find_seg_location(&rmpp_recv
->rmpp_wc
->rmpp_list
, seg_num
);
484 list_add(&mad_recv_wc
->recv_buf
.list
, &prev_buf
->list
);
485 if (rmpp_recv
->cur_seg_buf
== prev_buf
) {
486 update_seg_num(rmpp_recv
, &mad_recv_wc
->recv_buf
);
487 if (get_last_flag(rmpp_recv
->cur_seg_buf
)) {
488 rmpp_recv
->state
= RMPP_STATE_COMPLETE
;
489 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
490 done_wc
= complete_rmpp(rmpp_recv
);
492 } else if (rmpp_recv
->seg_num
== rmpp_recv
->newwin
) {
493 rmpp_recv
->newwin
+= window_size(agent
);
494 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
495 ack_recv(rmpp_recv
, mad_recv_wc
);
499 spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
501 deref_rmpp_recv(rmpp_recv
);
504 drop3
: spin_unlock_irqrestore(&rmpp_recv
->lock
, flags
);
505 drop2
: deref_rmpp_recv(rmpp_recv
);
506 drop1
: ib_free_recv_mad(mad_recv_wc
);
510 static struct ib_mad_recv_wc
*
511 start_rmpp(struct ib_mad_agent_private
*agent
,
512 struct ib_mad_recv_wc
*mad_recv_wc
)
514 struct mad_rmpp_recv
*rmpp_recv
;
517 rmpp_recv
= create_rmpp_recv(agent
, mad_recv_wc
);
519 ib_free_recv_mad(mad_recv_wc
);
523 spin_lock_irqsave(&agent
->lock
, flags
);
524 if (insert_rmpp_recv(agent
, rmpp_recv
)) {
525 spin_unlock_irqrestore(&agent
->lock
, flags
);
526 /* duplicate first MAD */
527 destroy_rmpp_recv(rmpp_recv
);
528 return continue_rmpp(agent
, mad_recv_wc
);
530 atomic_inc(&rmpp_recv
->refcount
);
532 if (get_last_flag(&mad_recv_wc
->recv_buf
)) {
533 rmpp_recv
->state
= RMPP_STATE_COMPLETE
;
534 spin_unlock_irqrestore(&agent
->lock
, flags
);
535 complete_rmpp(rmpp_recv
);
537 spin_unlock_irqrestore(&agent
->lock
, flags
);
538 /* 40 seconds until we can find the packet lifetimes */
539 queue_delayed_work(agent
->qp_info
->port_priv
->wq
,
540 &rmpp_recv
->timeout_work
,
541 msecs_to_jiffies(40000));
542 rmpp_recv
->newwin
+= window_size(agent
);
543 ack_recv(rmpp_recv
, mad_recv_wc
);
546 deref_rmpp_recv(rmpp_recv
);
550 static int send_next_seg(struct ib_mad_send_wr_private
*mad_send_wr
)
552 struct ib_rmpp_mad
*rmpp_mad
;
556 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
557 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
558 rmpp_mad
->rmpp_hdr
.seg_num
= cpu_to_be32(++mad_send_wr
->seg_num
);
560 if (mad_send_wr
->seg_num
== 1) {
561 rmpp_mad
->rmpp_hdr
.rmpp_rtime_flags
|= IB_MGMT_RMPP_FLAG_FIRST
;
562 paylen
= mad_send_wr
->send_buf
.seg_count
* IB_MGMT_RMPP_DATA
-
566 if (mad_send_wr
->seg_num
== mad_send_wr
->send_buf
.seg_count
) {
567 rmpp_mad
->rmpp_hdr
.rmpp_rtime_flags
|= IB_MGMT_RMPP_FLAG_LAST
;
568 paylen
= IB_MGMT_RMPP_DATA
- mad_send_wr
->pad
;
570 rmpp_mad
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(paylen
);
572 /* 2 seconds for an ACK until we can find the packet lifetime */
573 timeout
= mad_send_wr
->send_buf
.timeout_ms
;
574 if (!timeout
|| timeout
> 2000)
575 mad_send_wr
->timeout
= msecs_to_jiffies(2000);
577 return ib_send_mad(mad_send_wr
);
580 static void abort_send(struct ib_mad_agent_private
*agent
,
581 struct ib_mad_recv_wc
*mad_recv_wc
, u8 rmpp_status
)
583 struct ib_mad_send_wr_private
*mad_send_wr
;
584 struct ib_mad_send_wc wc
;
587 spin_lock_irqsave(&agent
->lock
, flags
);
588 mad_send_wr
= ib_find_send_mad(agent
, mad_recv_wc
);
590 goto out
; /* Unmatched send */
592 if ((mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) ||
593 (!mad_send_wr
->timeout
) || (mad_send_wr
->status
!= IB_WC_SUCCESS
))
594 goto out
; /* Send is already done */
596 ib_mark_mad_done(mad_send_wr
);
597 spin_unlock_irqrestore(&agent
->lock
, flags
);
599 wc
.status
= IB_WC_REM_ABORT_ERR
;
600 wc
.vendor_err
= rmpp_status
;
601 wc
.send_buf
= &mad_send_wr
->send_buf
;
602 ib_mad_complete_send_wr(mad_send_wr
, &wc
);
605 spin_unlock_irqrestore(&agent
->lock
, flags
);
608 static inline void adjust_last_ack(struct ib_mad_send_wr_private
*wr
,
611 struct list_head
*list
;
613 wr
->last_ack
= seg_num
;
614 list
= &wr
->last_ack_seg
->list
;
615 list_for_each_entry(wr
->last_ack_seg
, list
, list
)
616 if (wr
->last_ack_seg
->num
== seg_num
)
620 static void process_ds_ack(struct ib_mad_agent_private
*agent
,
621 struct ib_mad_recv_wc
*mad_recv_wc
, int newwin
)
623 struct mad_rmpp_recv
*rmpp_recv
;
625 rmpp_recv
= find_rmpp_recv(agent
, mad_recv_wc
);
626 if (rmpp_recv
&& rmpp_recv
->state
== RMPP_STATE_COMPLETE
)
627 rmpp_recv
->repwin
= newwin
;
630 static void process_rmpp_ack(struct ib_mad_agent_private
*agent
,
631 struct ib_mad_recv_wc
*mad_recv_wc
)
633 struct ib_mad_send_wr_private
*mad_send_wr
;
634 struct ib_rmpp_mad
*rmpp_mad
;
636 int seg_num
, newwin
, ret
;
638 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
639 if (rmpp_mad
->rmpp_hdr
.rmpp_status
) {
640 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
641 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
645 seg_num
= be32_to_cpu(rmpp_mad
->rmpp_hdr
.seg_num
);
646 newwin
= be32_to_cpu(rmpp_mad
->rmpp_hdr
.paylen_newwin
);
647 if (newwin
< seg_num
) {
648 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_W2S
);
649 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_W2S
);
653 spin_lock_irqsave(&agent
->lock
, flags
);
654 mad_send_wr
= ib_find_send_mad(agent
, mad_recv_wc
);
657 process_ds_ack(agent
, mad_recv_wc
, newwin
);
658 goto out
; /* Unmatched or DS RMPP ACK */
661 if ((mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) &&
662 (mad_send_wr
->timeout
)) {
663 spin_unlock_irqrestore(&agent
->lock
, flags
);
664 ack_ds_ack(agent
, mad_recv_wc
);
665 return; /* Repeated ACK for DS RMPP transaction */
668 if ((mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) ||
669 (!mad_send_wr
->timeout
) || (mad_send_wr
->status
!= IB_WC_SUCCESS
))
670 goto out
; /* Send is already done */
672 if (seg_num
> mad_send_wr
->send_buf
.seg_count
||
673 seg_num
> mad_send_wr
->newwin
) {
674 spin_unlock_irqrestore(&agent
->lock
, flags
);
675 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_S2B
);
676 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_S2B
);
680 if (newwin
< mad_send_wr
->newwin
|| seg_num
< mad_send_wr
->last_ack
)
681 goto out
; /* Old ACK */
683 if (seg_num
> mad_send_wr
->last_ack
) {
684 adjust_last_ack(mad_send_wr
, seg_num
);
685 mad_send_wr
->retries_left
= mad_send_wr
->max_retries
;
687 mad_send_wr
->newwin
= newwin
;
688 if (mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) {
689 /* If no response is expected, the ACK completes the send */
690 if (!mad_send_wr
->send_buf
.timeout_ms
) {
691 struct ib_mad_send_wc wc
;
693 ib_mark_mad_done(mad_send_wr
);
694 spin_unlock_irqrestore(&agent
->lock
, flags
);
696 wc
.status
= IB_WC_SUCCESS
;
698 wc
.send_buf
= &mad_send_wr
->send_buf
;
699 ib_mad_complete_send_wr(mad_send_wr
, &wc
);
702 if (mad_send_wr
->refcount
== 1)
703 ib_reset_mad_timeout(mad_send_wr
,
704 mad_send_wr
->send_buf
.timeout_ms
);
705 spin_unlock_irqrestore(&agent
->lock
, flags
);
706 ack_ds_ack(agent
, mad_recv_wc
);
708 } else if (mad_send_wr
->refcount
== 1 &&
709 mad_send_wr
->seg_num
< mad_send_wr
->newwin
&&
710 mad_send_wr
->seg_num
< mad_send_wr
->send_buf
.seg_count
) {
711 /* Send failure will just result in a timeout/retry */
712 ret
= send_next_seg(mad_send_wr
);
716 mad_send_wr
->refcount
++;
717 list_move_tail(&mad_send_wr
->agent_list
,
718 &mad_send_wr
->mad_agent_priv
->send_list
);
721 spin_unlock_irqrestore(&agent
->lock
, flags
);
724 static struct ib_mad_recv_wc
*
725 process_rmpp_data(struct ib_mad_agent_private
*agent
,
726 struct ib_mad_recv_wc
*mad_recv_wc
)
728 struct ib_rmpp_hdr
*rmpp_hdr
;
731 rmpp_hdr
= &((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
;
733 if (rmpp_hdr
->rmpp_status
) {
734 rmpp_status
= IB_MGMT_RMPP_STATUS_BAD_STATUS
;
738 if (rmpp_hdr
->seg_num
== cpu_to_be32(1)) {
739 if (!(ib_get_rmpp_flags(rmpp_hdr
) & IB_MGMT_RMPP_FLAG_FIRST
)) {
740 rmpp_status
= IB_MGMT_RMPP_STATUS_BAD_SEG
;
743 return start_rmpp(agent
, mad_recv_wc
);
745 if (ib_get_rmpp_flags(rmpp_hdr
) & IB_MGMT_RMPP_FLAG_FIRST
) {
746 rmpp_status
= IB_MGMT_RMPP_STATUS_BAD_SEG
;
749 return continue_rmpp(agent
, mad_recv_wc
);
752 nack_recv(agent
, mad_recv_wc
, rmpp_status
);
753 ib_free_recv_mad(mad_recv_wc
);
757 static void process_rmpp_stop(struct ib_mad_agent_private
*agent
,
758 struct ib_mad_recv_wc
*mad_recv_wc
)
760 struct ib_rmpp_mad
*rmpp_mad
;
762 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
764 if (rmpp_mad
->rmpp_hdr
.rmpp_status
!= IB_MGMT_RMPP_STATUS_RESX
) {
765 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
766 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
768 abort_send(agent
, mad_recv_wc
, rmpp_mad
->rmpp_hdr
.rmpp_status
);
771 static void process_rmpp_abort(struct ib_mad_agent_private
*agent
,
772 struct ib_mad_recv_wc
*mad_recv_wc
)
774 struct ib_rmpp_mad
*rmpp_mad
;
776 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
778 if (rmpp_mad
->rmpp_hdr
.rmpp_status
< IB_MGMT_RMPP_STATUS_ABORT_MIN
||
779 rmpp_mad
->rmpp_hdr
.rmpp_status
> IB_MGMT_RMPP_STATUS_ABORT_MAX
) {
780 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
781 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BAD_STATUS
);
783 abort_send(agent
, mad_recv_wc
, rmpp_mad
->rmpp_hdr
.rmpp_status
);
786 struct ib_mad_recv_wc
*
787 ib_process_rmpp_recv_wc(struct ib_mad_agent_private
*agent
,
788 struct ib_mad_recv_wc
*mad_recv_wc
)
790 struct ib_rmpp_mad
*rmpp_mad
;
792 rmpp_mad
= (struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
;
793 if (!(rmpp_mad
->rmpp_hdr
.rmpp_rtime_flags
& IB_MGMT_RMPP_FLAG_ACTIVE
))
796 if (rmpp_mad
->rmpp_hdr
.rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
797 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_UNV
);
798 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_UNV
);
802 switch (rmpp_mad
->rmpp_hdr
.rmpp_type
) {
803 case IB_MGMT_RMPP_TYPE_DATA
:
804 return process_rmpp_data(agent
, mad_recv_wc
);
805 case IB_MGMT_RMPP_TYPE_ACK
:
806 process_rmpp_ack(agent
, mad_recv_wc
);
808 case IB_MGMT_RMPP_TYPE_STOP
:
809 process_rmpp_stop(agent
, mad_recv_wc
);
811 case IB_MGMT_RMPP_TYPE_ABORT
:
812 process_rmpp_abort(agent
, mad_recv_wc
);
815 abort_send(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BADT
);
816 nack_recv(agent
, mad_recv_wc
, IB_MGMT_RMPP_STATUS_BADT
);
820 ib_free_recv_mad(mad_recv_wc
);
824 static int init_newwin(struct ib_mad_send_wr_private
*mad_send_wr
)
826 struct ib_mad_agent_private
*agent
= mad_send_wr
->mad_agent_priv
;
827 struct ib_mad_hdr
*mad_hdr
= mad_send_wr
->send_buf
.mad
;
828 struct mad_rmpp_recv
*rmpp_recv
;
829 struct ib_ah_attr ah_attr
;
833 if (!(mad_hdr
->method
& IB_MGMT_METHOD_RESP
))
836 spin_lock_irqsave(&agent
->lock
, flags
);
837 list_for_each_entry(rmpp_recv
, &agent
->rmpp_list
, list
) {
838 if (rmpp_recv
->tid
!= mad_hdr
->tid
||
839 rmpp_recv
->mgmt_class
!= mad_hdr
->mgmt_class
||
840 rmpp_recv
->class_version
!= mad_hdr
->class_version
||
841 (rmpp_recv
->method
& IB_MGMT_METHOD_RESP
))
844 if (ib_query_ah(mad_send_wr
->send_buf
.ah
, &ah_attr
))
847 if (rmpp_recv
->slid
== ah_attr
.dlid
) {
848 newwin
= rmpp_recv
->repwin
;
852 spin_unlock_irqrestore(&agent
->lock
, flags
);
857 int ib_send_rmpp_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
859 struct ib_rmpp_mad
*rmpp_mad
;
862 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
863 if (!(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
864 IB_MGMT_RMPP_FLAG_ACTIVE
))
865 return IB_RMPP_RESULT_UNHANDLED
;
867 if (rmpp_mad
->rmpp_hdr
.rmpp_type
!= IB_MGMT_RMPP_TYPE_DATA
) {
868 mad_send_wr
->seg_num
= 1;
869 return IB_RMPP_RESULT_INTERNAL
;
872 mad_send_wr
->newwin
= init_newwin(mad_send_wr
);
874 /* We need to wait for the final ACK even if there isn't a response */
875 mad_send_wr
->refcount
+= (mad_send_wr
->timeout
== 0);
876 ret
= send_next_seg(mad_send_wr
);
878 return IB_RMPP_RESULT_CONSUMED
;
882 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private
*mad_send_wr
,
883 struct ib_mad_send_wc
*mad_send_wc
)
885 struct ib_rmpp_mad
*rmpp_mad
;
888 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
889 if (!(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
890 IB_MGMT_RMPP_FLAG_ACTIVE
))
891 return IB_RMPP_RESULT_UNHANDLED
; /* RMPP not active */
893 if (rmpp_mad
->rmpp_hdr
.rmpp_type
!= IB_MGMT_RMPP_TYPE_DATA
)
894 return IB_RMPP_RESULT_INTERNAL
; /* ACK, STOP, or ABORT */
896 if (mad_send_wc
->status
!= IB_WC_SUCCESS
||
897 mad_send_wr
->status
!= IB_WC_SUCCESS
)
898 return IB_RMPP_RESULT_PROCESSED
; /* Canceled or send error */
900 if (!mad_send_wr
->timeout
)
901 return IB_RMPP_RESULT_PROCESSED
; /* Response received */
903 if (mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
) {
904 mad_send_wr
->timeout
=
905 msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
906 return IB_RMPP_RESULT_PROCESSED
; /* Send done */
909 if (mad_send_wr
->seg_num
== mad_send_wr
->newwin
||
910 mad_send_wr
->seg_num
== mad_send_wr
->send_buf
.seg_count
)
911 return IB_RMPP_RESULT_PROCESSED
; /* Wait for ACK */
913 ret
= send_next_seg(mad_send_wr
);
915 mad_send_wc
->status
= IB_WC_GENERAL_ERR
;
916 return IB_RMPP_RESULT_PROCESSED
;
918 return IB_RMPP_RESULT_CONSUMED
;
921 int ib_retry_rmpp(struct ib_mad_send_wr_private
*mad_send_wr
)
923 struct ib_rmpp_mad
*rmpp_mad
;
926 rmpp_mad
= mad_send_wr
->send_buf
.mad
;
927 if (!(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
928 IB_MGMT_RMPP_FLAG_ACTIVE
))
929 return IB_RMPP_RESULT_UNHANDLED
; /* RMPP not active */
931 if (mad_send_wr
->last_ack
== mad_send_wr
->send_buf
.seg_count
)
932 return IB_RMPP_RESULT_PROCESSED
;
934 mad_send_wr
->seg_num
= mad_send_wr
->last_ack
;
935 mad_send_wr
->cur_seg
= mad_send_wr
->last_ack_seg
;
937 ret
= send_next_seg(mad_send_wr
);
939 return IB_RMPP_RESULT_PROCESSED
;
941 return IB_RMPP_RESULT_CONSUMED
;