1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015-2018 Intel Corporation.
6 #include <linux/delay.h>
11 #define SC(name) SEND_CTXT_##name
13 * Send Context functions
15 static void sc_wait_for_packet_egress(struct send_context
*sc
, int pause
);
18 * Set the CM reset bit and wait for it to clear. Use the provided
19 * sendctrl register. This routine has no locking.
21 void __cm_reset(struct hfi1_devdata
*dd
, u64 sendctrl
)
23 write_csr(dd
, SEND_CTRL
, sendctrl
| SEND_CTRL_CM_RESET_SMASK
);
26 sendctrl
= read_csr(dd
, SEND_CTRL
);
27 if ((sendctrl
& SEND_CTRL_CM_RESET_SMASK
) == 0)
32 /* global control of PIO send */
33 void pio_send_control(struct hfi1_devdata
*dd
, int op
)
37 int write
= 1; /* write sendctrl back */
38 int flush
= 0; /* re-read sendctrl to make sure it is flushed */
41 spin_lock_irqsave(&dd
->sendctrl_lock
, flags
);
43 reg
= read_csr(dd
, SEND_CTRL
);
45 case PSC_GLOBAL_ENABLE
:
46 reg
|= SEND_CTRL_SEND_ENABLE_SMASK
;
48 case PSC_DATA_VL_ENABLE
:
50 for (i
= 0; i
< ARRAY_SIZE(dd
->vld
); i
++)
53 /* Disallow sending on VLs not enabled */
54 mask
= (mask
& SEND_CTRL_UNSUPPORTED_VL_MASK
) <<
55 SEND_CTRL_UNSUPPORTED_VL_SHIFT
;
56 reg
= (reg
& ~SEND_CTRL_UNSUPPORTED_VL_SMASK
) | mask
;
58 case PSC_GLOBAL_DISABLE
:
59 reg
&= ~SEND_CTRL_SEND_ENABLE_SMASK
;
61 case PSC_GLOBAL_VLARB_ENABLE
:
62 reg
|= SEND_CTRL_VL_ARBITER_ENABLE_SMASK
;
64 case PSC_GLOBAL_VLARB_DISABLE
:
65 reg
&= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK
;
69 write
= 0; /* CSR already written (and flushed) */
71 case PSC_DATA_VL_DISABLE
:
72 reg
|= SEND_CTRL_UNSUPPORTED_VL_SMASK
;
76 dd_dev_err(dd
, "%s: invalid control %d\n", __func__
, op
);
81 write_csr(dd
, SEND_CTRL
, reg
);
83 (void)read_csr(dd
, SEND_CTRL
); /* flush write */
86 spin_unlock_irqrestore(&dd
->sendctrl_lock
, flags
);
89 /* number of send context memory pools */
90 #define NUM_SC_POOLS 2
92 /* Send Context Size (SCS) wildcards */
96 /* Send Context Count (SCC) wildcards */
98 #define SCC_PER_CPU -2
99 #define SCC_PER_KRCVQ -3
101 /* Send Context Size (SCS) constants */
102 #define SCS_ACK_CREDITS 32
103 #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */
105 #define PIO_THRESHOLD_CEILING 4096
107 #define PIO_WAIT_BATCH_SIZE 5
109 /* default send context sizes */
110 static struct sc_config_sizes sc_config_sizes
[SC_MAX
] = {
111 [SC_KERNEL
] = { .size
= SCS_POOL_0
, /* even divide, pool 0 */
112 .count
= SCC_PER_VL
}, /* one per NUMA */
113 [SC_ACK
] = { .size
= SCS_ACK_CREDITS
,
114 .count
= SCC_PER_KRCVQ
},
115 [SC_USER
] = { .size
= SCS_POOL_0
, /* even divide, pool 0 */
116 .count
= SCC_PER_CPU
}, /* one per CPU */
117 [SC_VL15
] = { .size
= SCS_VL15_CREDITS
,
122 /* send context memory pool configuration */
123 struct mem_pool_config
{
124 int centipercent
; /* % of memory, in 100ths of 1% */
125 int absolute_blocks
; /* absolute block count */
128 /* default memory pool configuration: 100% in pool 0 */
129 static struct mem_pool_config sc_mem_pool_config
[NUM_SC_POOLS
] = {
130 /* centi%, abs blocks */
131 { 10000, -1 }, /* pool 0 */
132 { 0, -1 }, /* pool 1 */
135 /* memory pool information, used when calculating final sizes */
136 struct mem_pool_info
{
138 * 100th of 1% of memory to use, -1 if blocks
141 int count
; /* count of contexts in the pool */
142 int blocks
; /* block size of the pool */
143 int size
; /* context size, in blocks */
147 * Convert a pool wildcard to a valid pool index. The wildcards
148 * start at -1 and increase negatively. Map them as:
153 * Return -1 on non-wildcard input, otherwise convert to a pool number.
155 static int wildcard_to_pool(int wc
)
158 return -1; /* non-wildcard */
162 static const char *sc_type_names
[SC_MAX
] = {
169 static const char *sc_type_name(int index
)
171 if (index
< 0 || index
>= SC_MAX
)
173 return sc_type_names
[index
];
177 * Read the send context memory pool configuration and send context
178 * size configuration. Replace any wildcards and come up with final
179 * counts and sizes for the send context types.
181 int init_sc_pools_and_sizes(struct hfi1_devdata
*dd
)
183 struct mem_pool_info mem_pool_info
[NUM_SC_POOLS
] = { { 0 } };
184 int total_blocks
= (chip_pio_mem_size(dd
) / PIO_BLOCK_SIZE
) - 1;
185 int total_contexts
= 0;
189 int cp_total
; /* centipercent total */
190 int ab_total
; /* absolute block total */
195 * When SDMA is enabled, kernel context pio packet size is capped by
196 * "piothreshold". Reduce pio buffer allocation for kernel context by
197 * setting it to a fixed size. The allocation allows 3-deep buffering
198 * of the largest pio packets plus up to 128 bytes header, sufficient
199 * to maintain verbs performance.
201 * When SDMA is disabled, keep the default pooling allocation.
203 if (HFI1_CAP_IS_KSET(SDMA
)) {
204 u16 max_pkt_size
= (piothreshold
< PIO_THRESHOLD_CEILING
) ?
205 piothreshold
: PIO_THRESHOLD_CEILING
;
206 sc_config_sizes
[SC_KERNEL
].size
=
207 3 * (max_pkt_size
+ 128) / PIO_BLOCK_SIZE
;
212 * - copy the centipercents/absolute sizes from the pool config
213 * - sanity check these values
214 * - add up centipercents, then later check for full value
215 * - add up absolute blocks, then later check for over-commit
219 for (i
= 0; i
< NUM_SC_POOLS
; i
++) {
220 int cp
= sc_mem_pool_config
[i
].centipercent
;
221 int ab
= sc_mem_pool_config
[i
].absolute_blocks
;
224 * A negative value is "unused" or "invalid". Both *can*
225 * be valid, but centipercent wins, so check that first
227 if (cp
>= 0) { /* centipercent valid */
229 } else if (ab
>= 0) { /* absolute blocks valid */
231 } else { /* neither valid */
234 "Send context memory pool %d: both the block count and centipercent are invalid\n",
239 mem_pool_info
[i
].centipercent
= cp
;
240 mem_pool_info
[i
].blocks
= ab
;
243 /* do not use both % and absolute blocks for different pools */
244 if (cp_total
!= 0 && ab_total
!= 0) {
247 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
251 /* if any percentages are present, they must add up to 100% x 100 */
252 if (cp_total
!= 0 && cp_total
!= 10000) {
255 "Send context memory pool centipercent is %d, expecting 10000\n",
260 /* the absolute pool total cannot be more than the mem total */
261 if (ab_total
> total_blocks
) {
264 "Send context memory pool absolute block count %d is larger than the memory size %d\n",
265 ab_total
, total_blocks
);
271 * - copy from the context size config
272 * - replace context type wildcard counts with real values
273 * - add up non-memory pool block sizes
274 * - add up memory pool user counts
277 for (i
= 0; i
< SC_MAX
; i
++) {
278 int count
= sc_config_sizes
[i
].count
;
279 int size
= sc_config_sizes
[i
].size
;
283 * Sanity check count: Either a positive value or
284 * one of the expected wildcards is valid. The positive
285 * value is checked later when we compare against total
289 count
= dd
->n_krcv_queues
;
290 } else if (i
== SC_KERNEL
) {
291 count
= INIT_SC_PER_VL
* num_vls
;
292 } else if (count
== SCC_PER_CPU
) {
293 count
= dd
->num_rcv_contexts
- dd
->n_krcv_queues
;
294 } else if (count
< 0) {
297 "%s send context invalid count wildcard %d\n",
298 sc_type_name(i
), count
);
301 if (total_contexts
+ count
> chip_send_contexts(dd
))
302 count
= chip_send_contexts(dd
) - total_contexts
;
304 total_contexts
+= count
;
307 * Sanity check pool: The conversion will return a pool
308 * number or -1 if a fixed (non-negative) value. The fixed
309 * value is checked later when we compare against
310 * total memory available.
312 pool
= wildcard_to_pool(size
);
313 if (pool
== -1) { /* non-wildcard */
314 fixed_blocks
+= size
* count
;
315 } else if (pool
< NUM_SC_POOLS
) { /* valid wildcard */
316 mem_pool_info
[pool
].count
+= count
;
317 } else { /* invalid wildcard */
320 "%s send context invalid pool wildcard %d\n",
321 sc_type_name(i
), size
);
325 dd
->sc_sizes
[i
].count
= count
;
326 dd
->sc_sizes
[i
].size
= size
;
328 if (fixed_blocks
> total_blocks
) {
331 "Send context fixed block count, %u, larger than total block count %u\n",
332 fixed_blocks
, total_blocks
);
336 /* step 3: calculate the blocks in the pools, and pool context sizes */
337 pool_blocks
= total_blocks
- fixed_blocks
;
338 if (ab_total
> pool_blocks
) {
341 "Send context fixed pool sizes, %u, larger than pool block count %u\n",
342 ab_total
, pool_blocks
);
345 /* subtract off the fixed pool blocks */
346 pool_blocks
-= ab_total
;
348 for (i
= 0; i
< NUM_SC_POOLS
; i
++) {
349 struct mem_pool_info
*pi
= &mem_pool_info
[i
];
351 /* % beats absolute blocks */
352 if (pi
->centipercent
>= 0)
353 pi
->blocks
= (pool_blocks
* pi
->centipercent
) / 10000;
355 if (pi
->blocks
== 0 && pi
->count
!= 0) {
358 "Send context memory pool %d has %u contexts, but no blocks\n",
362 if (pi
->count
== 0) {
363 /* warn about wasted blocks */
367 "Send context memory pool %d has %u blocks, but zero contexts\n",
371 pi
->size
= pi
->blocks
/ pi
->count
;
375 /* step 4: fill in the context type sizes from the pool sizes */
377 for (i
= 0; i
< SC_MAX
; i
++) {
378 if (dd
->sc_sizes
[i
].size
< 0) {
379 unsigned pool
= wildcard_to_pool(dd
->sc_sizes
[i
].size
);
381 WARN_ON_ONCE(pool
>= NUM_SC_POOLS
);
382 dd
->sc_sizes
[i
].size
= mem_pool_info
[pool
].size
;
384 /* make sure we are not larger than what is allowed by the HW */
385 #define PIO_MAX_BLOCKS 1024
386 if (dd
->sc_sizes
[i
].size
> PIO_MAX_BLOCKS
)
387 dd
->sc_sizes
[i
].size
= PIO_MAX_BLOCKS
;
389 /* calculate our total usage */
390 used_blocks
+= dd
->sc_sizes
[i
].size
* dd
->sc_sizes
[i
].count
;
392 extra
= total_blocks
- used_blocks
;
394 dd_dev_info(dd
, "unused send context blocks: %d\n", extra
);
396 return total_contexts
;
399 int init_send_contexts(struct hfi1_devdata
*dd
)
402 int ret
, i
, j
, context
;
404 ret
= init_credit_return(dd
);
408 dd
->hw_to_sw
= kmalloc_array(TXE_NUM_CONTEXTS
, sizeof(u8
),
410 dd
->send_contexts
= kcalloc(dd
->num_send_contexts
,
411 sizeof(struct send_context_info
),
413 if (!dd
->send_contexts
|| !dd
->hw_to_sw
) {
415 kfree(dd
->send_contexts
);
416 free_credit_return(dd
);
420 /* hardware context map starts with invalid send context indices */
421 for (i
= 0; i
< TXE_NUM_CONTEXTS
; i
++)
422 dd
->hw_to_sw
[i
] = INVALID_SCI
;
425 * All send contexts have their credit sizes. Allocate credits
426 * for each context one after another from the global space.
430 for (i
= 0; i
< SC_MAX
; i
++) {
431 struct sc_config_sizes
*scs
= &dd
->sc_sizes
[i
];
433 for (j
= 0; j
< scs
->count
; j
++) {
434 struct send_context_info
*sci
=
435 &dd
->send_contexts
[context
];
438 sci
->credits
= scs
->size
;
449 * Allocate a software index and hardware context of the given type.
451 * Must be called with dd->sc_lock held.
453 static int sc_hw_alloc(struct hfi1_devdata
*dd
, int type
, u32
*sw_index
,
456 struct send_context_info
*sci
;
460 for (index
= 0, sci
= &dd
->send_contexts
[0];
461 index
< dd
->num_send_contexts
; index
++, sci
++) {
462 if (sci
->type
== type
&& sci
->allocated
== 0) {
464 /* use a 1:1 mapping, but make them non-equal */
465 context
= chip_send_contexts(dd
) - index
- 1;
466 dd
->hw_to_sw
[context
] = index
;
468 *hw_context
= context
;
469 return 0; /* success */
472 dd_dev_err(dd
, "Unable to locate a free type %d send context\n", type
);
477 * Free the send context given by its software index.
479 * Must be called with dd->sc_lock held.
481 static void sc_hw_free(struct hfi1_devdata
*dd
, u32 sw_index
, u32 hw_context
)
483 struct send_context_info
*sci
;
485 sci
= &dd
->send_contexts
[sw_index
];
486 if (!sci
->allocated
) {
487 dd_dev_err(dd
, "%s: sw_index %u not allocated? hw_context %u\n",
488 __func__
, sw_index
, hw_context
);
491 dd
->hw_to_sw
[hw_context
] = INVALID_SCI
;
494 /* return the base context of a context in a group */
495 static inline u32
group_context(u32 context
, u32 group
)
497 return (context
>> group
) << group
;
500 /* return the size of a group */
501 static inline u32
group_size(u32 group
)
507 * Obtain the credit return addresses, kernel virtual and bus, for the
510 * To understand this routine:
511 * o va and dma are arrays of struct credit_return. One for each physical
512 * send context, per NUMA.
513 * o Each send context always looks in its relative location in a struct
514 * credit_return for its credit return.
515 * o Each send context in a group must have its return address CSR programmed
516 * with the same value. Use the address of the first send context in the
519 static void cr_group_addresses(struct send_context
*sc
, dma_addr_t
*dma
)
521 u32 gc
= group_context(sc
->hw_context
, sc
->group
);
522 u32 index
= sc
->hw_context
& 0x7;
524 sc
->hw_free
= &sc
->dd
->cr_base
[sc
->node
].va
[gc
].cr
[index
];
525 *dma
= (unsigned long)
526 &((struct credit_return
*)sc
->dd
->cr_base
[sc
->node
].dma
)[gc
];
530 * Work queue function triggered in error interrupt routine for
533 static void sc_halted(struct work_struct
*work
)
535 struct send_context
*sc
;
537 sc
= container_of(work
, struct send_context
, halt_work
);
542 * Calculate PIO block threshold for this send context using the given MTU.
543 * Trigger a return when one MTU plus optional header of credits remain.
545 * Parameter mtu is in bytes.
546 * Parameter hdrqentsize is in DWORDs.
548 * Return value is what to write into the CSR: trigger return when
549 * unreturned credits pass this count.
551 u32
sc_mtu_to_threshold(struct send_context
*sc
, u32 mtu
, u32 hdrqentsize
)
556 /* add in the header size, then divide by the PIO block size */
557 mtu
+= hdrqentsize
<< 2;
558 release_credits
= DIV_ROUND_UP(mtu
, PIO_BLOCK_SIZE
);
560 /* check against this context's credits */
561 if (sc
->credits
<= release_credits
)
564 threshold
= sc
->credits
- release_credits
;
570 * Calculate credit threshold in terms of percent of the allocated credits.
571 * Trigger when unreturned credits equal or exceed the percentage of the whole.
573 * Return value is what to write into the CSR: trigger return when
574 * unreturned credits pass this count.
576 u32
sc_percent_to_threshold(struct send_context
*sc
, u32 percent
)
578 return (sc
->credits
* percent
) / 100;
582 * Set the credit return threshold.
584 void sc_set_cr_threshold(struct send_context
*sc
, u32 new_threshold
)
588 int force_return
= 0;
590 spin_lock_irqsave(&sc
->credit_ctrl_lock
, flags
);
592 old_threshold
= (sc
->credit_ctrl
>>
593 SC(CREDIT_CTRL_THRESHOLD_SHIFT
))
594 & SC(CREDIT_CTRL_THRESHOLD_MASK
);
596 if (new_threshold
!= old_threshold
) {
599 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK
))
601 & SC(CREDIT_CTRL_THRESHOLD_MASK
))
602 << SC(CREDIT_CTRL_THRESHOLD_SHIFT
));
603 write_kctxt_csr(sc
->dd
, sc
->hw_context
,
604 SC(CREDIT_CTRL
), sc
->credit_ctrl
);
606 /* force a credit return on change to avoid a possible stall */
610 spin_unlock_irqrestore(&sc
->credit_ctrl_lock
, flags
);
613 sc_return_credits(sc
);
619 * Set the CHECK_ENABLE register for the send context 'sc'.
621 void set_pio_integrity(struct send_context
*sc
)
623 struct hfi1_devdata
*dd
= sc
->dd
;
624 u32 hw_context
= sc
->hw_context
;
627 write_kctxt_csr(dd
, hw_context
,
629 hfi1_pkt_default_send_ctxt_mask(dd
, type
));
632 static u32
get_buffers_allocated(struct send_context
*sc
)
637 for_each_possible_cpu(cpu
)
638 ret
+= *per_cpu_ptr(sc
->buffers_allocated
, cpu
);
642 static void reset_buffers_allocated(struct send_context
*sc
)
646 for_each_possible_cpu(cpu
)
647 (*per_cpu_ptr(sc
->buffers_allocated
, cpu
)) = 0;
651 * Allocate a NUMA relative send context structure of the given type along
654 struct send_context
*sc_alloc(struct hfi1_devdata
*dd
, int type
,
655 uint hdrqentsize
, int numa
)
657 struct send_context_info
*sci
;
658 struct send_context
*sc
= NULL
;
668 /* do not allocate while frozen */
669 if (dd
->flags
& HFI1_FROZEN
)
672 sc
= kzalloc_node(sizeof(*sc
), GFP_KERNEL
, numa
);
676 sc
->buffers_allocated
= alloc_percpu(u32
);
677 if (!sc
->buffers_allocated
) {
680 "Cannot allocate buffers_allocated per cpu counters\n"
685 spin_lock_irqsave(&dd
->sc_lock
, flags
);
686 ret
= sc_hw_alloc(dd
, type
, &sw_index
, &hw_context
);
688 spin_unlock_irqrestore(&dd
->sc_lock
, flags
);
689 free_percpu(sc
->buffers_allocated
);
694 sci
= &dd
->send_contexts
[sw_index
];
700 spin_lock_init(&sc
->alloc_lock
);
701 spin_lock_init(&sc
->release_lock
);
702 spin_lock_init(&sc
->credit_ctrl_lock
);
703 seqlock_init(&sc
->waitlock
);
704 INIT_LIST_HEAD(&sc
->piowait
);
705 INIT_WORK(&sc
->halt_work
, sc_halted
);
706 init_waitqueue_head(&sc
->halt_wait
);
708 /* grouping is always single context for now */
711 sc
->sw_index
= sw_index
;
712 sc
->hw_context
= hw_context
;
713 cr_group_addresses(sc
, &dma
);
714 sc
->credits
= sci
->credits
;
715 sc
->size
= sc
->credits
* PIO_BLOCK_SIZE
;
717 /* PIO Send Memory Address details */
718 #define PIO_ADDR_CONTEXT_MASK 0xfful
719 #define PIO_ADDR_CONTEXT_SHIFT 16
720 sc
->base_addr
= dd
->piobase
+ ((hw_context
& PIO_ADDR_CONTEXT_MASK
)
721 << PIO_ADDR_CONTEXT_SHIFT
);
723 /* set base and credits */
724 reg
= ((sci
->credits
& SC(CTRL_CTXT_DEPTH_MASK
))
725 << SC(CTRL_CTXT_DEPTH_SHIFT
))
726 | ((sci
->base
& SC(CTRL_CTXT_BASE_MASK
))
727 << SC(CTRL_CTXT_BASE_SHIFT
));
728 write_kctxt_csr(dd
, hw_context
, SC(CTRL
), reg
);
730 set_pio_integrity(sc
);
732 /* unmask all errors */
733 write_kctxt_csr(dd
, hw_context
, SC(ERR_MASK
), (u64
)-1);
735 /* set the default partition key */
736 write_kctxt_csr(dd
, hw_context
, SC(CHECK_PARTITION_KEY
),
737 (SC(CHECK_PARTITION_KEY_VALUE_MASK
) &
739 SC(CHECK_PARTITION_KEY_VALUE_SHIFT
));
741 /* per context type checks */
742 if (type
== SC_USER
) {
743 opval
= USER_OPCODE_CHECK_VAL
;
744 opmask
= USER_OPCODE_CHECK_MASK
;
746 opval
= OPCODE_CHECK_VAL_DISABLED
;
747 opmask
= OPCODE_CHECK_MASK_DISABLED
;
750 /* set the send context check opcode mask and value */
751 write_kctxt_csr(dd
, hw_context
, SC(CHECK_OPCODE
),
752 ((u64
)opmask
<< SC(CHECK_OPCODE_MASK_SHIFT
)) |
753 ((u64
)opval
<< SC(CHECK_OPCODE_VALUE_SHIFT
)));
755 /* set up credit return */
756 reg
= dma
& SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK
);
757 write_kctxt_csr(dd
, hw_context
, SC(CREDIT_RETURN_ADDR
), reg
);
760 * Calculate the initial credit return threshold.
762 * For Ack contexts, set a threshold for half the credits.
763 * For User contexts use the given percentage. This has been
764 * sanitized on driver start-up.
765 * For Kernel contexts, use the default MTU plus a header
766 * or half the credits, whichever is smaller. This should
767 * work for both the 3-deep buffering allocation and the
768 * pooling allocation.
770 if (type
== SC_ACK
) {
771 thresh
= sc_percent_to_threshold(sc
, 50);
772 } else if (type
== SC_USER
) {
773 thresh
= sc_percent_to_threshold(sc
,
774 user_credit_return_threshold
);
775 } else { /* kernel */
776 thresh
= min(sc_percent_to_threshold(sc
, 50),
777 sc_mtu_to_threshold(sc
, hfi1_max_mtu
,
780 reg
= thresh
<< SC(CREDIT_CTRL_THRESHOLD_SHIFT
);
781 /* add in early return */
782 if (type
== SC_USER
&& HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN
))
783 reg
|= SC(CREDIT_CTRL_EARLY_RETURN_SMASK
);
784 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN
)) /* kernel, ack */
785 reg
|= SC(CREDIT_CTRL_EARLY_RETURN_SMASK
);
787 /* set up write-through credit_ctrl */
788 sc
->credit_ctrl
= reg
;
789 write_kctxt_csr(dd
, hw_context
, SC(CREDIT_CTRL
), reg
);
791 /* User send contexts should not allow sending on VL15 */
792 if (type
== SC_USER
) {
794 write_kctxt_csr(dd
, hw_context
, SC(CHECK_VL
), reg
);
797 spin_unlock_irqrestore(&dd
->sc_lock
, flags
);
800 * Allocate shadow ring to track outstanding PIO buffers _after_
801 * unlocking. We don't know the size until the lock is held and
802 * we can't allocate while the lock is held. No one is using
803 * the context yet, so allocate it now.
805 * User contexts do not get a shadow ring.
807 if (type
!= SC_USER
) {
809 * Size the shadow ring 1 larger than the number of credits
810 * so head == tail can mean empty.
812 sc
->sr_size
= sci
->credits
+ 1;
813 sc
->sr
= kcalloc_node(sc
->sr_size
,
814 sizeof(union pio_shadow_ring
),
823 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u",
835 /* free a per-NUMA send context structure */
836 void sc_free(struct send_context
*sc
)
838 struct hfi1_devdata
*dd
;
846 sc
->flags
|= SCF_IN_FREE
; /* ensure no restarts */
848 if (!list_empty(&sc
->piowait
))
849 dd_dev_err(dd
, "piowait list not empty!\n");
850 sw_index
= sc
->sw_index
;
851 hw_context
= sc
->hw_context
;
852 sc_disable(sc
); /* make sure the HW is disabled */
853 flush_work(&sc
->halt_work
);
855 spin_lock_irqsave(&dd
->sc_lock
, flags
);
856 dd
->send_contexts
[sw_index
].sc
= NULL
;
858 /* clear/disable all registers set in sc_alloc */
859 write_kctxt_csr(dd
, hw_context
, SC(CTRL
), 0);
860 write_kctxt_csr(dd
, hw_context
, SC(CHECK_ENABLE
), 0);
861 write_kctxt_csr(dd
, hw_context
, SC(ERR_MASK
), 0);
862 write_kctxt_csr(dd
, hw_context
, SC(CHECK_PARTITION_KEY
), 0);
863 write_kctxt_csr(dd
, hw_context
, SC(CHECK_OPCODE
), 0);
864 write_kctxt_csr(dd
, hw_context
, SC(CREDIT_RETURN_ADDR
), 0);
865 write_kctxt_csr(dd
, hw_context
, SC(CREDIT_CTRL
), 0);
867 /* release the index and context for re-use */
868 sc_hw_free(dd
, sw_index
, hw_context
);
869 spin_unlock_irqrestore(&dd
->sc_lock
, flags
);
872 free_percpu(sc
->buffers_allocated
);
876 /* disable the context */
877 void sc_disable(struct send_context
*sc
)
880 struct pio_buf
*pbuf
;
881 LIST_HEAD(wake_list
);
886 /* do all steps, even if already disabled */
887 spin_lock_irq(&sc
->alloc_lock
);
888 reg
= read_kctxt_csr(sc
->dd
, sc
->hw_context
, SC(CTRL
));
889 reg
&= ~SC(CTRL_CTXT_ENABLE_SMASK
);
890 sc
->flags
&= ~SCF_ENABLED
;
891 sc_wait_for_packet_egress(sc
, 1);
892 write_kctxt_csr(sc
->dd
, sc
->hw_context
, SC(CTRL
), reg
);
895 * Flush any waiters. Once the context is disabled,
896 * credit return interrupts are stopped (although there
897 * could be one in-process when the context is disabled).
898 * Wait one microsecond for any lingering interrupts, then
899 * proceed with the flush.
902 spin_lock(&sc
->release_lock
);
903 if (sc
->sr
) { /* this context has a shadow ring */
904 while (sc
->sr_tail
!= sc
->sr_head
) {
905 pbuf
= &sc
->sr
[sc
->sr_tail
].pbuf
;
907 (*pbuf
->cb
)(pbuf
->arg
, PRC_SC_DISABLE
);
909 if (sc
->sr_tail
>= sc
->sr_size
)
913 spin_unlock(&sc
->release_lock
);
915 write_seqlock(&sc
->waitlock
);
916 list_splice_init(&sc
->piowait
, &wake_list
);
917 write_sequnlock(&sc
->waitlock
);
918 while (!list_empty(&wake_list
)) {
921 struct hfi1_qp_priv
*priv
;
923 wait
= list_first_entry(&wake_list
, struct iowait
, list
);
924 qp
= iowait_to_qp(wait
);
926 list_del_init(&priv
->s_iowait
.list
);
927 priv
->s_iowait
.lock
= NULL
;
928 hfi1_qp_wakeup(qp
, RVT_S_WAIT_PIO
| HFI1_S_WAIT_PIO_DRAIN
);
931 spin_unlock_irq(&sc
->alloc_lock
);
934 /* return SendEgressCtxtStatus.PacketOccupancy */
935 static u64
packet_occupancy(u64 reg
)
938 SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK
)
939 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT
;
942 /* is egress halted on the context? */
943 static bool egress_halted(u64 reg
)
945 return !!(reg
& SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK
);
948 /* is the send context halted? */
949 static bool is_sc_halted(struct hfi1_devdata
*dd
, u32 hw_context
)
951 return !!(read_kctxt_csr(dd
, hw_context
, SC(STATUS
)) &
952 SC(STATUS_CTXT_HALTED_SMASK
));
956 * sc_wait_for_packet_egress - wait for packet
957 * @sc: valid send context
958 * @pause: wait for credit return
960 * Wait for packet egress, optionally pause for credit return
962 * Egress halt and Context halt are not necessarily the same thing, so
965 * NOTE: The context halt bit may not be set immediately. Because of this,
966 * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
967 * context bit to determine if the context is halted.
969 static void sc_wait_for_packet_egress(struct send_context
*sc
, int pause
)
971 struct hfi1_devdata
*dd
= sc
->dd
;
978 reg
= read_csr(dd
, sc
->hw_context
* 8 +
979 SEND_EGRESS_CTXT_STATUS
);
980 /* done if any halt bits, SW or HW are set */
981 if (sc
->flags
& SCF_HALTED
||
982 is_sc_halted(dd
, sc
->hw_context
) || egress_halted(reg
))
984 reg
= packet_occupancy(reg
);
987 /* counter is reset if occupancy count changes */
991 /* timed out - bounce the link */
993 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
994 __func__
, sc
->sw_index
,
995 sc
->hw_context
, (u32
)reg
);
996 queue_work(dd
->pport
->link_wq
,
997 &dd
->pport
->link_bounce_work
);
1005 /* Add additional delay to ensure chip returns all credits */
1006 pause_for_credit_return(dd
);
1009 void sc_wait(struct hfi1_devdata
*dd
)
1013 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
1014 struct send_context
*sc
= dd
->send_contexts
[i
].sc
;
1018 sc_wait_for_packet_egress(sc
, 0);
1023 * Restart a context after it has been halted due to error.
1025 * If the first step fails - wait for the halt to be asserted, return early.
1026 * Otherwise complain about timeouts but keep going.
1028 * It is expected that allocations (enabled flag bit) have been shut off
1029 * already (only applies to kernel contexts).
1031 int sc_restart(struct send_context
*sc
)
1033 struct hfi1_devdata
*dd
= sc
->dd
;
1038 /* bounce off if not halted, or being free'd */
1039 if (!(sc
->flags
& SCF_HALTED
) || (sc
->flags
& SCF_IN_FREE
))
1042 dd_dev_info(dd
, "restarting send context %u(%u)\n", sc
->sw_index
,
1046 * Step 1: Wait for the context to actually halt.
1048 * The error interrupt is asynchronous to actually setting halt
1053 reg
= read_kctxt_csr(dd
, sc
->hw_context
, SC(STATUS
));
1054 if (reg
& SC(STATUS_CTXT_HALTED_SMASK
))
1057 dd_dev_err(dd
, "%s: context %u(%u) not halting, skipping\n",
1058 __func__
, sc
->sw_index
, sc
->hw_context
);
1066 * Step 2: Ensure no users are still trying to write to PIO.
1068 * For kernel contexts, we have already turned off buffer allocation.
1069 * Now wait for the buffer count to go to zero.
1071 * For user contexts, the user handling code has cut off write access
1072 * to the context's PIO pages before calling this routine and will
1073 * restore write access after this routine returns.
1075 if (sc
->type
!= SC_USER
) {
1076 /* kernel context */
1079 count
= get_buffers_allocated(sc
);
1084 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1085 __func__
, sc
->sw_index
,
1086 sc
->hw_context
, count
);
1094 * Step 3: Wait for all packets to egress.
1095 * This is done while disabling the send context
1097 * Step 4: Disable the context
1099 * This is a superset of the halt. After the disable, the
1100 * errors can be cleared.
1105 * Step 5: Enable the context
1107 * This enable will clear the halted flag and per-send context
1110 return sc_enable(sc
);
1114 * PIO freeze processing. To be called after the TXE block is fully frozen.
1115 * Go through all frozen send contexts and disable them. The contexts are
1116 * already stopped by the freeze.
1118 void pio_freeze(struct hfi1_devdata
*dd
)
1120 struct send_context
*sc
;
1123 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
1124 sc
= dd
->send_contexts
[i
].sc
;
1126 * Don't disable unallocated, unfrozen, or user send contexts.
1127 * User send contexts will be disabled when the process
1128 * calls into the driver to reset its context.
1130 if (!sc
|| !(sc
->flags
& SCF_FROZEN
) || sc
->type
== SC_USER
)
1133 /* only need to disable, the context is already stopped */
1139 * Unfreeze PIO for kernel send contexts. The precondition for calling this
1140 * is that all PIO send contexts have been disabled and the SPC freeze has
1141 * been cleared. Now perform the last step and re-enable each kernel context.
1142 * User (PSM) processing will occur when PSM calls into the kernel to
1143 * acknowledge the freeze.
1145 void pio_kernel_unfreeze(struct hfi1_devdata
*dd
)
1147 struct send_context
*sc
;
1150 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
1151 sc
= dd
->send_contexts
[i
].sc
;
1152 if (!sc
|| !(sc
->flags
& SCF_FROZEN
) || sc
->type
== SC_USER
)
1154 if (sc
->flags
& SCF_LINK_DOWN
)
1157 sc_enable(sc
); /* will clear the sc frozen flag */
1162 * pio_kernel_linkup() - Re-enable send contexts after linkup event
1163 * @dd: valid devive data
1165 * When the link goes down, the freeze path is taken. However, a link down
1166 * event is different from a freeze because if the send context is re-enabled
1167 * whowever is sending data will start sending data again, which will hang
1168 * any QP that is sending data.
1170 * The freeze path now looks at the type of event that occurs and takes this
1171 * path for link down event.
1173 void pio_kernel_linkup(struct hfi1_devdata
*dd
)
1175 struct send_context
*sc
;
1178 for (i
= 0; i
< dd
->num_send_contexts
; i
++) {
1179 sc
= dd
->send_contexts
[i
].sc
;
1180 if (!sc
|| !(sc
->flags
& SCF_LINK_DOWN
) || sc
->type
== SC_USER
)
1183 sc_enable(sc
); /* will clear the sc link down flag */
1188 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1190 * -ETIMEDOUT - if we wait too long
1191 * -EIO - if there was an error
1193 static int pio_init_wait_progress(struct hfi1_devdata
*dd
)
1198 /* max is the longest possible HW init time / delay */
1199 max
= (dd
->icode
== ICODE_FPGA_EMULATION
) ? 120 : 5;
1201 reg
= read_csr(dd
, SEND_PIO_INIT_CTXT
);
1202 if (!(reg
& SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK
))
1210 return reg
& SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK
? -EIO
: 0;
1214 * Reset all of the send contexts to their power-on state. Used
1215 * only during manual init - no lock against sc_enable needed.
1217 void pio_reset_all(struct hfi1_devdata
*dd
)
1221 /* make sure the init engine is not busy */
1222 ret
= pio_init_wait_progress(dd
);
1223 /* ignore any timeout */
1225 /* clear the error */
1226 write_csr(dd
, SEND_PIO_ERR_CLEAR
,
1227 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK
);
1230 /* reset init all */
1231 write_csr(dd
, SEND_PIO_INIT_CTXT
,
1232 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK
);
1234 ret
= pio_init_wait_progress(dd
);
1237 "PIO send context init %s while initializing all PIO blocks\n",
1238 ret
== -ETIMEDOUT
? "is stuck" : "had an error");
1242 /* enable the context */
1243 int sc_enable(struct send_context
*sc
)
1245 u64 sc_ctrl
, reg
, pio
;
1246 struct hfi1_devdata
*dd
;
1247 unsigned long flags
;
1255 * Obtain the allocator lock to guard against any allocation
1256 * attempts (which should not happen prior to context being
1257 * enabled). On the release/disable side we don't need to
1258 * worry about locking since the releaser will not do anything
1259 * if the context accounting values have not changed.
1261 spin_lock_irqsave(&sc
->alloc_lock
, flags
);
1262 sc_ctrl
= read_kctxt_csr(dd
, sc
->hw_context
, SC(CTRL
));
1263 if ((sc_ctrl
& SC(CTRL_CTXT_ENABLE_SMASK
)))
1264 goto unlock
; /* already enabled */
1266 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1276 /* the alloc lock insures no fast path allocation */
1277 reset_buffers_allocated(sc
);
1280 * Clear all per-context errors. Some of these will be set when
1281 * we are re-enabling after a context halt. Now that the context
1282 * is disabled, the halt will not clear until after the PIO init
1283 * engine runs below.
1285 reg
= read_kctxt_csr(dd
, sc
->hw_context
, SC(ERR_STATUS
));
1287 write_kctxt_csr(dd
, sc
->hw_context
, SC(ERR_CLEAR
), reg
);
1290 * The HW PIO initialization engine can handle only one init
1291 * request at a time. Serialize access to each device's engine.
1293 spin_lock(&dd
->sc_init_lock
);
1295 * Since access to this code block is serialized and
1296 * each access waits for the initialization to complete
1297 * before releasing the lock, the PIO initialization engine
1298 * should not be in use, so we don't have to wait for the
1299 * InProgress bit to go down.
1301 pio
= ((sc
->hw_context
& SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK
) <<
1302 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT
) |
1303 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK
;
1304 write_csr(dd
, SEND_PIO_INIT_CTXT
, pio
);
1306 * Wait until the engine is done. Give the chip the required time
1307 * so, hopefully, we read the register just once.
1310 ret
= pio_init_wait_progress(dd
);
1311 spin_unlock(&dd
->sc_init_lock
);
1314 "sctxt%u(%u): Context not enabled due to init failure %d\n",
1315 sc
->sw_index
, sc
->hw_context
, ret
);
1320 * All is well. Enable the context.
1322 sc_ctrl
|= SC(CTRL_CTXT_ENABLE_SMASK
);
1323 write_kctxt_csr(dd
, sc
->hw_context
, SC(CTRL
), sc_ctrl
);
1325 * Read SendCtxtCtrl to force the write out and prevent a timing
1326 * hazard where a PIO write may reach the context before the enable.
1328 read_kctxt_csr(dd
, sc
->hw_context
, SC(CTRL
));
1329 sc
->flags
|= SCF_ENABLED
;
1332 spin_unlock_irqrestore(&sc
->alloc_lock
, flags
);
1337 /* force a credit return on the context */
1338 void sc_return_credits(struct send_context
*sc
)
1343 /* a 0->1 transition schedules a credit return */
1344 write_kctxt_csr(sc
->dd
, sc
->hw_context
, SC(CREDIT_FORCE
),
1345 SC(CREDIT_FORCE_FORCE_RETURN_SMASK
));
1347 * Ensure that the write is flushed and the credit return is
1348 * scheduled. We care more about the 0 -> 1 transition.
1350 read_kctxt_csr(sc
->dd
, sc
->hw_context
, SC(CREDIT_FORCE
));
1351 /* set back to 0 for next time */
1352 write_kctxt_csr(sc
->dd
, sc
->hw_context
, SC(CREDIT_FORCE
), 0);
1355 /* allow all in-flight packets to drain on the context */
1356 void sc_flush(struct send_context
*sc
)
1361 sc_wait_for_packet_egress(sc
, 1);
1364 /* drop all packets on the context, no waiting until they are sent */
1365 void sc_drop(struct send_context
*sc
)
1370 dd_dev_info(sc
->dd
, "%s: context %u(%u) - not implemented\n",
1371 __func__
, sc
->sw_index
, sc
->hw_context
);
1375 * Start the software reaction to a context halt or SPC freeze:
1376 * - mark the context as halted or frozen
1377 * - stop buffer allocations
1379 * Called from the error interrupt. Other work is deferred until
1380 * out of the interrupt.
1382 void sc_stop(struct send_context
*sc
, int flag
)
1384 unsigned long flags
;
1386 /* stop buffer allocations */
1387 spin_lock_irqsave(&sc
->alloc_lock
, flags
);
1388 /* mark the context */
1390 sc
->flags
&= ~SCF_ENABLED
;
1391 spin_unlock_irqrestore(&sc
->alloc_lock
, flags
);
1392 wake_up(&sc
->halt_wait
);
1395 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1396 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1399 * The send context buffer "allocator".
1401 * @sc: the PIO send context we are allocating from
1402 * @len: length of whole packet - including PBC - in dwords
1403 * @cb: optional callback to call when the buffer is finished sending
1404 * @arg: argument for cb
1406 * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
1407 * when link is down.
1409 struct pio_buf
*sc_buffer_alloc(struct send_context
*sc
, u32 dw_len
,
1410 pio_release_cb cb
, void *arg
)
1412 struct pio_buf
*pbuf
= NULL
;
1413 unsigned long flags
;
1414 unsigned long avail
;
1415 unsigned long blocks
= dwords_to_blocks(dw_len
);
1420 spin_lock_irqsave(&sc
->alloc_lock
, flags
);
1421 if (!(sc
->flags
& SCF_ENABLED
)) {
1422 spin_unlock_irqrestore(&sc
->alloc_lock
, flags
);
1423 return ERR_PTR(-ECOMM
);
1427 avail
= (unsigned long)sc
->credits
- (sc
->fill
- sc
->alloc_free
);
1428 if (blocks
> avail
) {
1429 /* not enough room */
1430 if (unlikely(trycount
)) { /* already tried to get more room */
1431 spin_unlock_irqrestore(&sc
->alloc_lock
, flags
);
1434 /* copy from receiver cache line and recalculate */
1435 sc
->alloc_free
= READ_ONCE(sc
->free
);
1437 (unsigned long)sc
->credits
-
1438 (sc
->fill
- sc
->alloc_free
);
1439 if (blocks
> avail
) {
1440 /* still no room, actively update */
1441 sc_release_update(sc
);
1442 sc
->alloc_free
= READ_ONCE(sc
->free
);
1448 /* there is enough room */
1451 this_cpu_inc(*sc
->buffers_allocated
);
1453 /* read this once */
1456 /* "allocate" the buffer */
1458 fill_wrap
= sc
->fill_wrap
;
1459 sc
->fill_wrap
+= blocks
;
1460 if (sc
->fill_wrap
>= sc
->credits
)
1461 sc
->fill_wrap
= sc
->fill_wrap
- sc
->credits
;
1464 * Fill the parts that the releaser looks at before moving the head.
1465 * The only necessary piece is the sent_at field. The credits
1466 * we have just allocated cannot have been returned yet, so the
1467 * cb and arg will not be looked at for a "while". Put them
1468 * on this side of the memory barrier anyway.
1470 pbuf
= &sc
->sr
[head
].pbuf
;
1471 pbuf
->sent_at
= sc
->fill
;
1474 pbuf
->sc
= sc
; /* could be filled in at sc->sr init time */
1475 /* make sure this is in memory before updating the head */
1477 /* calculate next head index, do not store */
1479 if (next
>= sc
->sr_size
)
1482 * update the head - must be last! - the releaser can look at fields
1483 * in pbuf once we move the head
1487 spin_unlock_irqrestore(&sc
->alloc_lock
, flags
);
1489 /* finish filling in the buffer outside the lock */
1490 pbuf
->start
= sc
->base_addr
+ fill_wrap
* PIO_BLOCK_SIZE
;
1491 pbuf
->end
= sc
->base_addr
+ sc
->size
;
1492 pbuf
->qw_written
= 0;
1493 pbuf
->carry_bytes
= 0;
1494 pbuf
->carry
.val64
= 0;
1500 * There are at least two entities that can turn on credit return
1501 * interrupts and they can overlap. Avoid problems by implementing
1502 * a count scheme that is enforced by a lock. The lock is needed because
1503 * the count and CSR write must be paired.
1507 * Start credit return interrupts. This is managed by a count. If already
1508 * on, just increment the count.
1510 void sc_add_credit_return_intr(struct send_context
*sc
)
1512 unsigned long flags
;
1514 /* lock must surround both the count change and the CSR update */
1515 spin_lock_irqsave(&sc
->credit_ctrl_lock
, flags
);
1516 if (sc
->credit_intr_count
== 0) {
1517 sc
->credit_ctrl
|= SC(CREDIT_CTRL_CREDIT_INTR_SMASK
);
1518 write_kctxt_csr(sc
->dd
, sc
->hw_context
,
1519 SC(CREDIT_CTRL
), sc
->credit_ctrl
);
1521 sc
->credit_intr_count
++;
1522 spin_unlock_irqrestore(&sc
->credit_ctrl_lock
, flags
);
1526 * Stop credit return interrupts. This is managed by a count. Decrement the
1527 * count, if the last user, then turn the credit interrupts off.
1529 void sc_del_credit_return_intr(struct send_context
*sc
)
1531 unsigned long flags
;
1533 WARN_ON(sc
->credit_intr_count
== 0);
1535 /* lock must surround both the count change and the CSR update */
1536 spin_lock_irqsave(&sc
->credit_ctrl_lock
, flags
);
1537 sc
->credit_intr_count
--;
1538 if (sc
->credit_intr_count
== 0) {
1539 sc
->credit_ctrl
&= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK
);
1540 write_kctxt_csr(sc
->dd
, sc
->hw_context
,
1541 SC(CREDIT_CTRL
), sc
->credit_ctrl
);
1543 spin_unlock_irqrestore(&sc
->credit_ctrl_lock
, flags
);
1547 * The caller must be careful when calling this. All needint calls
1548 * must be paired with !needint.
1550 void hfi1_sc_wantpiobuf_intr(struct send_context
*sc
, u32 needint
)
1553 sc_add_credit_return_intr(sc
);
1555 sc_del_credit_return_intr(sc
);
1556 trace_hfi1_wantpiointr(sc
, needint
, sc
->credit_ctrl
);
1558 sc_return_credits(sc
);
1562 * sc_piobufavail - callback when a PIO buffer is available
1563 * @sc: the send context
1565 * This is called from the interrupt handler when a PIO buffer is
1566 * available after hfi1_verbs_send() returned an error that no buffers were
1567 * available. Disable the interrupt if there are no more QPs waiting.
1569 static void sc_piobufavail(struct send_context
*sc
)
1571 struct hfi1_devdata
*dd
= sc
->dd
;
1572 struct list_head
*list
;
1573 struct rvt_qp
*qps
[PIO_WAIT_BATCH_SIZE
];
1575 struct hfi1_qp_priv
*priv
;
1576 unsigned long flags
;
1577 uint i
, n
= 0, top_idx
= 0;
1579 if (dd
->send_contexts
[sc
->sw_index
].type
!= SC_KERNEL
&&
1580 dd
->send_contexts
[sc
->sw_index
].type
!= SC_VL15
)
1582 list
= &sc
->piowait
;
1584 * Note: checking that the piowait list is empty and clearing
1585 * the buffer available interrupt needs to be atomic or we
1586 * could end up with QPs on the wait list with the interrupt
1589 write_seqlock_irqsave(&sc
->waitlock
, flags
);
1590 while (!list_empty(list
)) {
1591 struct iowait
*wait
;
1593 if (n
== ARRAY_SIZE(qps
))
1595 wait
= list_first_entry(list
, struct iowait
, list
);
1596 iowait_get_priority(wait
);
1597 qp
= iowait_to_qp(wait
);
1599 list_del_init(&priv
->s_iowait
.list
);
1600 priv
->s_iowait
.lock
= NULL
;
1602 priv
= qps
[top_idx
]->priv
;
1603 top_idx
= iowait_priority_update_top(wait
,
1608 /* refcount held until actual wake up */
1612 * If there had been waiters and there are more
1613 * insure that we redo the force to avoid a potential hang.
1616 hfi1_sc_wantpiobuf_intr(sc
, 0);
1617 if (!list_empty(list
))
1618 hfi1_sc_wantpiobuf_intr(sc
, 1);
1620 write_sequnlock_irqrestore(&sc
->waitlock
, flags
);
1622 /* Wake up the top-priority one first */
1624 hfi1_qp_wakeup(qps
[top_idx
],
1625 RVT_S_WAIT_PIO
| HFI1_S_WAIT_PIO_DRAIN
);
1626 for (i
= 0; i
< n
; i
++)
1628 hfi1_qp_wakeup(qps
[i
],
1629 RVT_S_WAIT_PIO
| HFI1_S_WAIT_PIO_DRAIN
);
1632 /* translate a send credit update to a bit code of reasons */
1633 static inline int fill_code(u64 hw_free
)
1637 if (hw_free
& CR_STATUS_SMASK
)
1638 code
|= PRC_STATUS_ERR
;
1639 if (hw_free
& CR_CREDIT_RETURN_DUE_TO_PBC_SMASK
)
1641 if (hw_free
& CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK
)
1642 code
|= PRC_THRESHOLD
;
1643 if (hw_free
& CR_CREDIT_RETURN_DUE_TO_ERR_SMASK
)
1644 code
|= PRC_FILL_ERR
;
1645 if (hw_free
& CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK
)
1646 code
|= PRC_SC_DISABLE
;
1650 /* use the jiffies compare to get the wrap right */
1651 #define sent_before(a, b) time_before(a, b) /* a < b */
1654 * The send context buffer "releaser".
1656 void sc_release_update(struct send_context
*sc
)
1658 struct pio_buf
*pbuf
;
1661 unsigned long old_free
;
1663 unsigned long extra
;
1664 unsigned long flags
;
1670 spin_lock_irqsave(&sc
->release_lock
, flags
);
1672 hw_free
= le64_to_cpu(*sc
->hw_free
); /* volatile read */
1673 old_free
= sc
->free
;
1674 extra
= (((hw_free
& CR_COUNTER_SMASK
) >> CR_COUNTER_SHIFT
)
1675 - (old_free
& CR_COUNTER_MASK
))
1677 free
= old_free
+ extra
;
1678 trace_hfi1_piofree(sc
, extra
);
1680 /* call sent buffer callbacks */
1681 code
= -1; /* code not yet set */
1682 head
= READ_ONCE(sc
->sr_head
); /* snapshot the head */
1684 while (head
!= tail
) {
1685 pbuf
= &sc
->sr
[tail
].pbuf
;
1687 if (sent_before(free
, pbuf
->sent_at
)) {
1692 if (code
< 0) /* fill in code on first user */
1693 code
= fill_code(hw_free
);
1694 (*pbuf
->cb
)(pbuf
->arg
, code
);
1698 if (tail
>= sc
->sr_size
)
1702 /* make sure tail is updated before free */
1705 spin_unlock_irqrestore(&sc
->release_lock
, flags
);
1710 * Send context group releaser. Argument is the send context that caused
1711 * the interrupt. Called from the send context interrupt handler.
1713 * Call release on all contexts in the group.
1715 * This routine takes the sc_lock without an irqsave because it is only
1716 * called from an interrupt handler. Adjust if that changes.
1718 void sc_group_release_update(struct hfi1_devdata
*dd
, u32 hw_context
)
1720 struct send_context
*sc
;
1724 spin_lock(&dd
->sc_lock
);
1725 sw_index
= dd
->hw_to_sw
[hw_context
];
1726 if (unlikely(sw_index
>= dd
->num_send_contexts
)) {
1727 dd_dev_err(dd
, "%s: invalid hw (%u) to sw (%u) mapping\n",
1728 __func__
, hw_context
, sw_index
);
1731 sc
= dd
->send_contexts
[sw_index
].sc
;
1735 gc
= group_context(hw_context
, sc
->group
);
1736 gc_end
= gc
+ group_size(sc
->group
);
1737 for (; gc
< gc_end
; gc
++) {
1738 sw_index
= dd
->hw_to_sw
[gc
];
1739 if (unlikely(sw_index
>= dd
->num_send_contexts
)) {
1741 "%s: invalid hw (%u) to sw (%u) mapping\n",
1742 __func__
, hw_context
, sw_index
);
1745 sc_release_update(dd
->send_contexts
[sw_index
].sc
);
1748 spin_unlock(&dd
->sc_lock
);
1752 * pio_select_send_context_vl() - select send context
1754 * @selector: a spreading factor
1757 * This function returns a send context based on the selector and a vl.
1758 * The mapping fields are protected by RCU
1760 struct send_context
*pio_select_send_context_vl(struct hfi1_devdata
*dd
,
1761 u32 selector
, u8 vl
)
1763 struct pio_vl_map
*m
;
1764 struct pio_map_elem
*e
;
1765 struct send_context
*rval
;
1768 * NOTE This should only happen if SC->VL changed after the initial
1769 * checks on the QP/AH
1770 * Default will return VL0's send context below
1772 if (unlikely(vl
>= num_vls
)) {
1778 m
= rcu_dereference(dd
->pio_map
);
1781 return dd
->vld
[0].sc
;
1783 e
= m
->map
[vl
& m
->mask
];
1784 rval
= e
->ksc
[selector
& e
->mask
];
1788 rval
= !rval
? dd
->vld
[0].sc
: rval
;
1793 * pio_select_send_context_sc() - select send context
1795 * @selector: a spreading factor
1796 * @sc5: the 5 bit sc
1798 * This function returns an send context based on the selector and an sc
1800 struct send_context
*pio_select_send_context_sc(struct hfi1_devdata
*dd
,
1801 u32 selector
, u8 sc5
)
1803 u8 vl
= sc_to_vlt(dd
, sc5
);
1805 return pio_select_send_context_vl(dd
, selector
, vl
);
1809 * Free the indicated map struct
1811 static void pio_map_free(struct pio_vl_map
*m
)
1815 for (i
= 0; m
&& i
< m
->actual_vls
; i
++)
1821 * Handle RCU callback
1823 static void pio_map_rcu_callback(struct rcu_head
*list
)
1825 struct pio_vl_map
*m
= container_of(list
, struct pio_vl_map
, list
);
1831 * Set credit return threshold for the kernel send context
1833 static void set_threshold(struct hfi1_devdata
*dd
, int scontext
, int i
)
1837 thres
= min(sc_percent_to_threshold(dd
->kernel_send_context
[scontext
],
1839 sc_mtu_to_threshold(dd
->kernel_send_context
[scontext
],
1841 dd
->rcd
[0]->rcvhdrqentsize
));
1842 sc_set_cr_threshold(dd
->kernel_send_context
[scontext
], thres
);
1846 * pio_map_init - called when #vls change
1848 * @port: port number
1849 * @num_vls: number of vls
1850 * @vl_scontexts: per vl send context mapping (optional)
1852 * This routine changes the mapping based on the number of vls.
1854 * vl_scontexts is used to specify a non-uniform vl/send context
1855 * loading. NULL implies auto computing the loading and giving each
1856 * VL an uniform distribution of send contexts per VL.
1858 * The auto algorithm computers the sc_per_vl and the number of extra
1859 * send contexts. Any extra send contexts are added from the last VL
1862 * rcu locking is used here to control access to the mapping fields.
1864 * If either the num_vls or num_send_contexts are non-power of 2, the
1865 * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1866 * rounded up to the next highest power of 2 and the first entry is
1867 * reused in a round robin fashion.
1869 * If an error occurs the map change is not done and the mapping is not
1873 int pio_map_init(struct hfi1_devdata
*dd
, u8 port
, u8 num_vls
, u8
*vl_scontexts
)
1876 int extra
, sc_per_vl
;
1878 int num_kernel_send_contexts
= 0;
1879 u8 lvl_scontexts
[OPA_MAX_VLS
];
1880 struct pio_vl_map
*oldmap
, *newmap
;
1882 if (!vl_scontexts
) {
1883 for (i
= 0; i
< dd
->num_send_contexts
; i
++)
1884 if (dd
->send_contexts
[i
].type
== SC_KERNEL
)
1885 num_kernel_send_contexts
++;
1886 /* truncate divide */
1887 sc_per_vl
= num_kernel_send_contexts
/ num_vls
;
1889 extra
= num_kernel_send_contexts
% num_vls
;
1890 vl_scontexts
= lvl_scontexts
;
1891 /* add extras from last vl down */
1892 for (i
= num_vls
- 1; i
>= 0; i
--, extra
--)
1893 vl_scontexts
[i
] = sc_per_vl
+ (extra
> 0 ? 1 : 0);
1896 newmap
= kzalloc(struct_size(newmap
, map
, roundup_pow_of_two(num_vls
)),
1900 newmap
->actual_vls
= num_vls
;
1901 newmap
->vls
= roundup_pow_of_two(num_vls
);
1902 newmap
->mask
= (1 << ilog2(newmap
->vls
)) - 1;
1903 for (i
= 0; i
< newmap
->vls
; i
++) {
1904 /* save for wrap around */
1905 int first_scontext
= scontext
;
1907 if (i
< newmap
->actual_vls
) {
1908 int sz
= roundup_pow_of_two(vl_scontexts
[i
]);
1910 /* only allocate once */
1911 newmap
->map
[i
] = kzalloc(struct_size(newmap
->map
[i
],
1914 if (!newmap
->map
[i
])
1916 newmap
->map
[i
]->mask
= (1 << ilog2(sz
)) - 1;
1918 * assign send contexts and
1919 * adjust credit return threshold
1921 for (j
= 0; j
< sz
; j
++) {
1922 if (dd
->kernel_send_context
[scontext
]) {
1923 newmap
->map
[i
]->ksc
[j
] =
1924 dd
->kernel_send_context
[scontext
];
1925 set_threshold(dd
, scontext
, i
);
1927 if (++scontext
>= first_scontext
+
1929 /* wrap back to first send context */
1930 scontext
= first_scontext
;
1933 /* just re-use entry without allocating */
1934 newmap
->map
[i
] = newmap
->map
[i
% num_vls
];
1936 scontext
= first_scontext
+ vl_scontexts
[i
];
1938 /* newmap in hand, save old map */
1939 spin_lock_irq(&dd
->pio_map_lock
);
1940 oldmap
= rcu_dereference_protected(dd
->pio_map
,
1941 lockdep_is_held(&dd
->pio_map_lock
));
1943 /* publish newmap */
1944 rcu_assign_pointer(dd
->pio_map
, newmap
);
1946 spin_unlock_irq(&dd
->pio_map_lock
);
1947 /* success, free any old map after grace period */
1949 call_rcu(&oldmap
->list
, pio_map_rcu_callback
);
1952 /* free any partial allocation */
1953 pio_map_free(newmap
);
1957 void free_pio_map(struct hfi1_devdata
*dd
)
1959 /* Free PIO map if allocated */
1960 if (rcu_access_pointer(dd
->pio_map
)) {
1961 spin_lock_irq(&dd
->pio_map_lock
);
1962 pio_map_free(rcu_access_pointer(dd
->pio_map
));
1963 RCU_INIT_POINTER(dd
->pio_map
, NULL
);
1964 spin_unlock_irq(&dd
->pio_map_lock
);
1967 kfree(dd
->kernel_send_context
);
1968 dd
->kernel_send_context
= NULL
;
1971 int init_pervl_scs(struct hfi1_devdata
*dd
)
1974 u64 mask
, all_vl_mask
= (u64
)0x80ff; /* VLs 0-7, 15 */
1975 u64 data_vls_mask
= (u64
)0x00ff; /* VLs 0-7 */
1977 struct hfi1_pportdata
*ppd
= dd
->pport
;
1979 dd
->vld
[15].sc
= sc_alloc(dd
, SC_VL15
,
1980 dd
->rcd
[0]->rcvhdrqentsize
, dd
->node
);
1981 if (!dd
->vld
[15].sc
)
1984 hfi1_init_ctxt(dd
->vld
[15].sc
);
1985 dd
->vld
[15].mtu
= enum_to_mtu(OPA_MTU_2048
);
1987 dd
->kernel_send_context
= kcalloc_node(dd
->num_send_contexts
,
1988 sizeof(struct send_context
*),
1989 GFP_KERNEL
, dd
->node
);
1990 if (!dd
->kernel_send_context
)
1993 dd
->kernel_send_context
[0] = dd
->vld
[15].sc
;
1995 for (i
= 0; i
< num_vls
; i
++) {
1997 * Since this function does not deal with a specific
1998 * receive context but we need the RcvHdrQ entry size,
1999 * use the size from rcd[0]. It is guaranteed to be
2000 * valid at this point and will remain the same for all
2003 dd
->vld
[i
].sc
= sc_alloc(dd
, SC_KERNEL
,
2004 dd
->rcd
[0]->rcvhdrqentsize
, dd
->node
);
2007 dd
->kernel_send_context
[i
+ 1] = dd
->vld
[i
].sc
;
2008 hfi1_init_ctxt(dd
->vld
[i
].sc
);
2009 /* non VL15 start with the max MTU */
2010 dd
->vld
[i
].mtu
= hfi1_max_mtu
;
2012 for (i
= num_vls
; i
< INIT_SC_PER_VL
* num_vls
; i
++) {
2013 dd
->kernel_send_context
[i
+ 1] =
2014 sc_alloc(dd
, SC_KERNEL
, dd
->rcd
[0]->rcvhdrqentsize
, dd
->node
);
2015 if (!dd
->kernel_send_context
[i
+ 1])
2017 hfi1_init_ctxt(dd
->kernel_send_context
[i
+ 1]);
2020 sc_enable(dd
->vld
[15].sc
);
2021 ctxt
= dd
->vld
[15].sc
->hw_context
;
2022 mask
= all_vl_mask
& ~(1LL << 15);
2023 write_kctxt_csr(dd
, ctxt
, SC(CHECK_VL
), mask
);
2025 "Using send context %u(%u) for VL15\n",
2026 dd
->vld
[15].sc
->sw_index
, ctxt
);
2028 for (i
= 0; i
< num_vls
; i
++) {
2029 sc_enable(dd
->vld
[i
].sc
);
2030 ctxt
= dd
->vld
[i
].sc
->hw_context
;
2031 mask
= all_vl_mask
& ~(data_vls_mask
);
2032 write_kctxt_csr(dd
, ctxt
, SC(CHECK_VL
), mask
);
2034 for (i
= num_vls
; i
< INIT_SC_PER_VL
* num_vls
; i
++) {
2035 sc_enable(dd
->kernel_send_context
[i
+ 1]);
2036 ctxt
= dd
->kernel_send_context
[i
+ 1]->hw_context
;
2037 mask
= all_vl_mask
& ~(data_vls_mask
);
2038 write_kctxt_csr(dd
, ctxt
, SC(CHECK_VL
), mask
);
2041 if (pio_map_init(dd
, ppd
->port
- 1, num_vls
, NULL
))
2046 for (i
= 0; i
< num_vls
; i
++) {
2047 sc_free(dd
->vld
[i
].sc
);
2048 dd
->vld
[i
].sc
= NULL
;
2051 for (i
= num_vls
; i
< INIT_SC_PER_VL
* num_vls
; i
++)
2052 sc_free(dd
->kernel_send_context
[i
+ 1]);
2054 kfree(dd
->kernel_send_context
);
2055 dd
->kernel_send_context
= NULL
;
2058 sc_free(dd
->vld
[15].sc
);
2062 int init_credit_return(struct hfi1_devdata
*dd
)
2067 dd
->cr_base
= kcalloc(
2068 node_affinity
.num_possible_nodes
,
2069 sizeof(struct credit_return_base
),
2075 for_each_node_with_cpus(i
) {
2076 int bytes
= TXE_NUM_CONTEXTS
* sizeof(struct credit_return
);
2078 set_dev_node(&dd
->pcidev
->dev
, i
);
2079 dd
->cr_base
[i
].va
= dma_alloc_coherent(&dd
->pcidev
->dev
,
2081 &dd
->cr_base
[i
].dma
,
2083 if (!dd
->cr_base
[i
].va
) {
2084 set_dev_node(&dd
->pcidev
->dev
, dd
->node
);
2086 "Unable to allocate credit return DMA range for NUMA %d\n",
2092 set_dev_node(&dd
->pcidev
->dev
, dd
->node
);
2099 free_credit_return(dd
);
2103 void free_credit_return(struct hfi1_devdata
*dd
)
2109 for (i
= 0; i
< node_affinity
.num_possible_nodes
; i
++) {
2110 if (dd
->cr_base
[i
].va
) {
2111 dma_free_coherent(&dd
->pcidev
->dev
,
2113 sizeof(struct credit_return
),
2115 dd
->cr_base
[i
].dma
);
2122 void seqfile_dump_sci(struct seq_file
*s
, u32 i
,
2123 struct send_context_info
*sci
)
2125 struct send_context
*sc
= sci
->sc
;
2128 seq_printf(s
, "SCI %u: type %u base %u credits %u\n",
2129 i
, sci
->type
, sci
->base
, sci
->credits
);
2130 seq_printf(s
, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
2131 sc
->flags
, sc
->sw_index
, sc
->hw_context
, sc
->group
);
2132 seq_printf(s
, " sr_size %u credits %u sr_head %u sr_tail %u\n",
2133 sc
->sr_size
, sc
->credits
, sc
->sr_head
, sc
->sr_tail
);
2134 seq_printf(s
, " fill %lu free %lu fill_wrap %u alloc_free %lu\n",
2135 sc
->fill
, sc
->free
, sc
->fill_wrap
, sc
->alloc_free
);
2136 seq_printf(s
, " credit_intr_count %u credit_ctrl 0x%llx\n",
2137 sc
->credit_intr_count
, sc
->credit_ctrl
);
2138 reg
= read_kctxt_csr(sc
->dd
, sc
->hw_context
, SC(CREDIT_STATUS
));
2139 seq_printf(s
, " *hw_free %llu CurrentFree %llu LastReturned %llu\n",
2140 (le64_to_cpu(*sc
->hw_free
) & CR_COUNTER_SMASK
) >>
2142 (reg
>> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT
)) &
2143 SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK
),
2144 reg
& SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK
));