4 * Copyright(c) 2015, 2016 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/types.h>
51 #include <linux/list.h>
52 #include <asm/byteorder.h>
53 #include <linux/workqueue.h>
54 #include <linux/rculist.h>
58 #include "sdma_txreq.h"
62 /* Hardware limit for SDMA packet size */
63 #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
65 #define SDMA_TXREQ_S_OK 0
66 #define SDMA_TXREQ_S_SENDERROR 1
67 #define SDMA_TXREQ_S_ABORTED 2
68 #define SDMA_TXREQ_S_SHUTDOWN 3
71 #define SDMA_TXREQ_F_URGENT 0x0001
72 #define SDMA_TXREQ_F_AHG_COPY 0x0002
73 #define SDMA_TXREQ_F_USE_AHG 0x0004
75 #define SDMA_MAP_NONE 0
76 #define SDMA_MAP_SINGLE 1
77 #define SDMA_MAP_PAGE 2
79 #define SDMA_AHG_VALUE_MASK 0xffff
80 #define SDMA_AHG_VALUE_SHIFT 0
81 #define SDMA_AHG_INDEX_MASK 0xf
82 #define SDMA_AHG_INDEX_SHIFT 16
83 #define SDMA_AHG_FIELD_LEN_MASK 0xf
84 #define SDMA_AHG_FIELD_LEN_SHIFT 20
85 #define SDMA_AHG_FIELD_START_MASK 0x1f
86 #define SDMA_AHG_FIELD_START_SHIFT 24
87 #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
88 #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
93 * Be aware the ordering and values
94 * for SDMA_AHG_APPLY_UPDATE[123]
95 * are assumed in generating a skip
96 * count in submit_tx() in sdma.c
98 #define SDMA_AHG_NO_AHG 0
99 #define SDMA_AHG_COPY 1
100 #define SDMA_AHG_APPLY_UPDATE1 2
101 #define SDMA_AHG_APPLY_UPDATE2 3
102 #define SDMA_AHG_APPLY_UPDATE3 4
105 * Bits defined in the send DMA descriptor.
107 #define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
108 #define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
109 #define SDMA_DESC0_BYTE_COUNT_SHIFT 48
110 #define SDMA_DESC0_BYTE_COUNT_WIDTH 14
111 #define SDMA_DESC0_BYTE_COUNT_MASK \
112 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
113 #define SDMA_DESC0_BYTE_COUNT_SMASK \
114 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
115 #define SDMA_DESC0_PHY_ADDR_SHIFT 0
116 #define SDMA_DESC0_PHY_ADDR_WIDTH 48
117 #define SDMA_DESC0_PHY_ADDR_MASK \
118 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
119 #define SDMA_DESC0_PHY_ADDR_SMASK \
120 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
122 #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
123 #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
124 #define SDMA_DESC1_HEADER_UPDATE1_MASK \
125 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
126 #define SDMA_DESC1_HEADER_UPDATE1_SMASK \
127 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
128 #define SDMA_DESC1_HEADER_MODE_SHIFT 13
129 #define SDMA_DESC1_HEADER_MODE_WIDTH 3
130 #define SDMA_DESC1_HEADER_MODE_MASK \
131 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
132 #define SDMA_DESC1_HEADER_MODE_SMASK \
133 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
134 #define SDMA_DESC1_HEADER_INDEX_SHIFT 8
135 #define SDMA_DESC1_HEADER_INDEX_WIDTH 5
136 #define SDMA_DESC1_HEADER_INDEX_MASK \
137 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
138 #define SDMA_DESC1_HEADER_INDEX_SMASK \
139 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
140 #define SDMA_DESC1_HEADER_DWS_SHIFT 4
141 #define SDMA_DESC1_HEADER_DWS_WIDTH 4
142 #define SDMA_DESC1_HEADER_DWS_MASK \
143 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
144 #define SDMA_DESC1_HEADER_DWS_SMASK \
145 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
146 #define SDMA_DESC1_GENERATION_SHIFT 2
147 #define SDMA_DESC1_GENERATION_WIDTH 2
148 #define SDMA_DESC1_GENERATION_MASK \
149 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
150 #define SDMA_DESC1_GENERATION_SMASK \
151 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
152 #define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
153 #define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
156 sdma_state_s00_hw_down
,
157 sdma_state_s10_hw_start_up_halt_wait
,
158 sdma_state_s15_hw_start_up_clean_wait
,
160 sdma_state_s30_sw_clean_up_wait
,
161 sdma_state_s40_hw_clean_up_wait
,
162 sdma_state_s50_hw_halt_wait
,
163 sdma_state_s60_idle_halt_wait
,
164 sdma_state_s80_hw_freeze
,
165 sdma_state_s82_freeze_sw_clean
,
166 sdma_state_s99_running
,
170 sdma_event_e00_go_hw_down
,
171 sdma_event_e10_go_hw_start
,
172 sdma_event_e15_hw_halt_done
,
173 sdma_event_e25_hw_clean_up_done
,
174 sdma_event_e30_go_running
,
175 sdma_event_e40_sw_cleaned
,
176 sdma_event_e50_hw_cleaned
,
177 sdma_event_e60_hw_halted
,
178 sdma_event_e70_go_idle
,
179 sdma_event_e80_hw_freeze
,
180 sdma_event_e81_hw_frozen
,
181 sdma_event_e82_hw_unfreeze
,
182 sdma_event_e85_link_down
,
183 sdma_event_e90_sw_halted
,
186 struct sdma_set_state_action
{
187 unsigned op_enable
:1;
188 unsigned op_intenable
:1;
190 unsigned op_cleanup
:1;
191 unsigned go_s99_running_tofalse
:1;
192 unsigned go_s99_running_totrue
:1;
197 struct completion comp
;
198 enum sdma_states current_state
;
200 unsigned go_s99_running
;
201 /* debugging/development */
202 enum sdma_states previous_state
;
203 unsigned previous_op
;
204 enum sdma_events last_event
;
208 * DOC: sdma exported routines
210 * These sdma routines fit into three categories:
211 * - The SDMA API for building and submitting packets
214 * - Initialization and tear down routines to buildup
217 * - ISR entrances to handle interrupts, state changes
222 * DOC: sdma PSM/verbs API
224 * The sdma API is designed to be used by both PSM
225 * and verbs to supply packets to the SDMA ring.
227 * The usage of the API is as follows:
229 * Embed a struct iowait in the QP or
230 * PQ. The iowait should be initialized with a
231 * call to iowait_init().
233 * The user of the API should create an allocation method
234 * for their version of the txreq. slabs, pre-allocated lists,
235 * and dma pools can be used. Once the user's overload of
236 * the sdma_txreq has been allocated, the sdma_txreq member
237 * must be initialized with sdma_txinit() or sdma_txinit_ahg().
239 * The txreq must be declared with the sdma_txreq first.
241 * The tx request, once initialized, is manipulated with calls to
242 * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
243 * for each disjoint memory location. It is the user's responsibility
244 * to understand the packet boundaries and page boundaries to do the
245 * appropriate number of sdma_txadd_* calls.. The user
246 * must be prepared to deal with failures from these routines due to
247 * either memory allocation or dma_mapping failures.
249 * The mapping specifics for each memory location are recorded
250 * in the tx. Memory locations added with sdma_txadd_page()
251 * and sdma_txadd_kvaddr() are automatically mapped when added
252 * to the tx and nmapped as part of the progress processing in the
253 * SDMA interrupt handling.
255 * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
256 * tx. An example of a use case would be a pre-allocated
257 * set of headers allocated via dma_pool_alloc() or
258 * dma_alloc_coherent(). For these memory locations, it
259 * is the responsibility of the user to handle that unmapping.
260 * (This would usually be at an unload or job termination.)
262 * The routine sdma_send_txreq() is used to submit
263 * a tx to the ring after the appropriate number of
264 * sdma_txadd_* have been done.
266 * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
267 * can be used to submit a list of packets.
269 * The user is free to use the link overhead in the struct sdma_txreq as
270 * long as the tx isn't in flight.
272 * The extreme degenerate case of the number of descriptors
273 * exceeding the ring size is automatically handled as
274 * memory locations are added. An overflow of the descriptor
275 * array that is part of the sdma_txreq is also automatically
281 * DOC: Infrastructure calls
283 * sdma_init() is used to initialize data structures and
284 * CSRs for the desired number of SDMA engines.
286 * sdma_start() is used to kick the SDMA engines initialized
287 * with sdma_init(). Interrupts must be enabled at this
288 * point since aspects of the state machine are interrupt
291 * sdma_engine_error() and sdma_engine_interrupt() are
292 * entrances for interrupts.
294 * sdma_map_init() is for the management of the mapping
295 * table when the number of vls is changed.
300 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
302 * This is the raw descriptor in the SDMA ring
304 struct hw_sdma_desc
{
305 /* private: don't use directly */
310 * struct sdma_engine - Data pertaining to each SDMA engine.
311 * @dd: a back-pointer to the device data
312 * @ppd: per port back-pointer
313 * @imask: mask for irq manipulation
314 * @idle_mask: mask for determining if an interrupt is due to sdma_idle
316 * This structure has the state for each sdma_engine.
318 * Accessing to non public fields are not supported
319 * since the private members are subject to change.
323 struct hfi1_devdata
*dd
;
324 struct hfi1_pportdata
*ppd
;
326 void __iomem
*tail_csr
;
327 u64 imask
; /* clear interrupt mask */
332 volatile __le64
*head_dma
; /* DMA'ed by chip */
334 dma_addr_t head_phys
;
336 struct hw_sdma_desc
*descq
;
338 unsigned descq_full_count
;
339 struct sdma_txreq
**tx_ring
;
341 dma_addr_t descq_phys
;
345 struct sdma_state state
;
351 u8 this_idx
; /* zero relative engine */
352 /* protect changes to senddmactrl shadow */
353 spinlock_t senddmactrl_lock
;
355 u64 p_senddmactrl
; /* shadow per-engine SendDmaCtrl */
357 /* read/write using tail_lock */
358 spinlock_t tail_lock ____cacheline_aligned_in_smp
;
359 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
366 unsigned long ahg_bits
;
374 /* read/write using head_lock */
376 seqlock_t head_lock ____cacheline_aligned_in_smp
;
377 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
392 u64 progress_int_cnt
;
395 struct list_head dmawait
;
397 /* CONFIG SDMA for now, just blindly duplicate */
399 struct tasklet_struct sdma_hw_clean_up_task
400 ____cacheline_aligned_in_smp
;
403 struct tasklet_struct sdma_sw_clean_up_task
404 ____cacheline_aligned_in_smp
;
406 struct work_struct err_halt_worker
;
408 struct timer_list err_progress_check_timer
;
409 u32 progress_check_head
;
411 struct work_struct flush_worker
;
412 /* protect flush list */
413 spinlock_t flushlist_lock
;
415 struct list_head flushlist
;
416 struct cpumask cpu_mask
;
420 int sdma_init(struct hfi1_devdata
*dd
, u8 port
);
421 void sdma_start(struct hfi1_devdata
*dd
);
422 void sdma_exit(struct hfi1_devdata
*dd
);
423 void sdma_clean(struct hfi1_devdata
*dd
, size_t num_engines
);
424 void sdma_all_running(struct hfi1_devdata
*dd
);
425 void sdma_all_idle(struct hfi1_devdata
*dd
);
426 void sdma_freeze_notify(struct hfi1_devdata
*dd
, int go_idle
);
427 void sdma_freeze(struct hfi1_devdata
*dd
);
428 void sdma_unfreeze(struct hfi1_devdata
*dd
);
429 void sdma_wait(struct hfi1_devdata
*dd
);
432 * sdma_empty() - idle engine test
433 * @engine: sdma engine
435 * Currently used by verbs as a latency optimization.
438 * 1 - empty, 0 - non-empty
440 static inline int sdma_empty(struct sdma_engine
*sde
)
442 return sde
->descq_tail
== sde
->descq_head
;
445 static inline u16
sdma_descq_freecnt(struct sdma_engine
*sde
)
447 return sde
->descq_cnt
-
449 READ_ONCE(sde
->descq_head
)) - 1;
452 static inline u16
sdma_descq_inprocess(struct sdma_engine
*sde
)
454 return sde
->descq_cnt
- sdma_descq_freecnt(sde
);
458 * Either head_lock or tail lock required to see
461 static inline int __sdma_running(struct sdma_engine
*engine
)
463 return engine
->state
.current_state
== sdma_state_s99_running
;
467 * sdma_running() - state suitability test
468 * @engine: sdma engine
470 * sdma_running probes the internal state to determine if it is suitable
471 * for submitting packets.
474 * 1 - ok to submit, 0 - not ok to submit
477 static inline int sdma_running(struct sdma_engine
*engine
)
482 spin_lock_irqsave(&engine
->tail_lock
, flags
);
483 ret
= __sdma_running(engine
);
484 spin_unlock_irqrestore(&engine
->tail_lock
, flags
);
488 void _sdma_txreq_ahgadd(
489 struct sdma_txreq
*tx
,
496 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
497 * @tx: tx request to initialize
498 * @flags: flags to key last descriptor additions
499 * @tlen: total packet length (pbc + headers + data)
500 * @ahg_entry: ahg entry to use (0 - 31)
501 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
502 * @ahg: array of AHG descriptors (up to 9 entries)
503 * @ahg_hlen: number of bytes from ASIC entry to use
506 * The allocation of the sdma_txreq and it enclosing structure is user
507 * dependent. This routine must be called to initialize the user independent
510 * The currently supported flags are SDMA_TXREQ_F_URGENT,
511 * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
513 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
514 * completion is desired as soon as possible.
516 * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
517 * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
518 * the AHG descriptors into the first 1 to 3 descriptors.
520 * Completions of submitted requests can be gotten on selected
521 * txreqs by giving a completion routine callback to sdma_txinit() or
522 * sdma_txinit_ahg(). The environment in which the callback runs
523 * can be from an ISR, a tasklet, or a thread, so no sleeping
524 * kernel routines can be used. Aspects of the sdma ring may
525 * be locked so care should be taken with locking.
527 * The callback pointer can be NULL to avoid any callback for the packet
528 * being submitted. The callback will be provided this tx, a status, and a flag.
530 * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
531 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
533 * The flag, if the is the iowait had been used, indicates the iowait
534 * sdma_busy count has reached zero.
536 * user data portion of tlen should be precise. The sdma_txadd_* entrances
537 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
538 * specified in tlen have been supplied to the sdma_txreq.
540 * ahg_hlen is used to determine the number of on-chip entry bytes to
541 * use as the header. This is for cases where the stored header is
542 * larger than the header to be used in a packet. This is typical
543 * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
544 * and RDMA_WRITE_MIDDLE.
547 static inline int sdma_txinit_ahg(
548 struct sdma_txreq
*tx
,
555 void (*cb
)(struct sdma_txreq
*, int))
559 if (tlen
> MAX_SDMA_PKT_SIZE
)
561 tx
->desc_limit
= ARRAY_SIZE(tx
->descs
);
562 tx
->descp
= &tx
->descs
[0];
563 INIT_LIST_HEAD(&tx
->list
);
567 tx
->coalesce_buf
= NULL
;
569 tx
->packet_len
= tlen
;
570 tx
->tlen
= tx
->packet_len
;
571 tx
->descs
[0].qw
[0] = SDMA_DESC0_FIRST_DESC_FLAG
;
572 tx
->descs
[0].qw
[1] = 0;
573 if (flags
& SDMA_TXREQ_F_AHG_COPY
)
574 tx
->descs
[0].qw
[1] |=
575 (((u64
)ahg_entry
& SDMA_DESC1_HEADER_INDEX_MASK
)
576 << SDMA_DESC1_HEADER_INDEX_SHIFT
) |
577 (((u64
)SDMA_AHG_COPY
& SDMA_DESC1_HEADER_MODE_MASK
)
578 << SDMA_DESC1_HEADER_MODE_SHIFT
);
579 else if (flags
& SDMA_TXREQ_F_USE_AHG
&& num_ahg
)
580 _sdma_txreq_ahgadd(tx
, num_ahg
, ahg_entry
, ahg
, ahg_hlen
);
585 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
586 * @tx: tx request to initialize
587 * @flags: flags to key last descriptor additions
588 * @tlen: total packet length (pbc + headers + data)
589 * @cb: callback pointer
591 * The allocation of the sdma_txreq and it enclosing structure is user
592 * dependent. This routine must be called to initialize the user
593 * independent fields.
595 * The currently supported flags is SDMA_TXREQ_F_URGENT.
597 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
598 * completion is desired as soon as possible.
600 * Completions of submitted requests can be gotten on selected
601 * txreqs by giving a completion routine callback to sdma_txinit() or
602 * sdma_txinit_ahg(). The environment in which the callback runs
603 * can be from an ISR, a tasklet, or a thread, so no sleeping
604 * kernel routines can be used. The head size of the sdma ring may
605 * be locked so care should be taken with locking.
607 * The callback pointer can be NULL to avoid any callback for the packet
610 * The callback, if non-NULL, will be provided this tx and a status. The
611 * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
612 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
615 static inline int sdma_txinit(
616 struct sdma_txreq
*tx
,
619 void (*cb
)(struct sdma_txreq
*, int))
621 return sdma_txinit_ahg(tx
, flags
, tlen
, 0, 0, NULL
, 0, cb
);
624 /* helpers - don't use */
625 static inline int sdma_mapping_type(struct sdma_desc
*d
)
627 return (d
->qw
[1] & SDMA_DESC1_GENERATION_SMASK
)
628 >> SDMA_DESC1_GENERATION_SHIFT
;
631 static inline size_t sdma_mapping_len(struct sdma_desc
*d
)
633 return (d
->qw
[0] & SDMA_DESC0_BYTE_COUNT_SMASK
)
634 >> SDMA_DESC0_BYTE_COUNT_SHIFT
;
637 static inline dma_addr_t
sdma_mapping_addr(struct sdma_desc
*d
)
639 return (d
->qw
[0] & SDMA_DESC0_PHY_ADDR_SMASK
)
640 >> SDMA_DESC0_PHY_ADDR_SHIFT
;
643 static inline void make_tx_sdma_desc(
644 struct sdma_txreq
*tx
,
649 struct sdma_desc
*desc
= &tx
->descp
[tx
->num_desc
];
652 /* qw[0] zero; qw[1] first, ahg mode already in from init */
653 desc
->qw
[1] |= ((u64
)type
& SDMA_DESC1_GENERATION_MASK
)
654 << SDMA_DESC1_GENERATION_SHIFT
;
657 desc
->qw
[1] = ((u64
)type
& SDMA_DESC1_GENERATION_MASK
)
658 << SDMA_DESC1_GENERATION_SHIFT
;
660 desc
->qw
[0] |= (((u64
)addr
& SDMA_DESC0_PHY_ADDR_MASK
)
661 << SDMA_DESC0_PHY_ADDR_SHIFT
) |
662 (((u64
)len
& SDMA_DESC0_BYTE_COUNT_MASK
)
663 << SDMA_DESC0_BYTE_COUNT_SHIFT
);
666 /* helper to extend txreq */
667 int ext_coal_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
,
668 int type
, void *kvaddr
, struct page
*page
,
669 unsigned long offset
, u16 len
);
670 int _pad_sdma_tx_descs(struct hfi1_devdata
*, struct sdma_txreq
*);
671 void __sdma_txclean(struct hfi1_devdata
*, struct sdma_txreq
*);
673 static inline void sdma_txclean(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
676 __sdma_txclean(dd
, tx
);
679 /* helpers used by public routines */
680 static inline void _sdma_close_tx(struct hfi1_devdata
*dd
,
681 struct sdma_txreq
*tx
)
683 tx
->descp
[tx
->num_desc
].qw
[0] |=
684 SDMA_DESC0_LAST_DESC_FLAG
;
685 tx
->descp
[tx
->num_desc
].qw
[1] |=
687 if (tx
->flags
& SDMA_TXREQ_F_URGENT
)
688 tx
->descp
[tx
->num_desc
].qw
[1] |=
689 (SDMA_DESC1_HEAD_TO_HOST_FLAG
|
690 SDMA_DESC1_INT_REQ_FLAG
);
693 static inline int _sdma_txadd_daddr(
694 struct hfi1_devdata
*dd
,
696 struct sdma_txreq
*tx
,
706 WARN_ON(len
> tx
->tlen
);
708 /* special cases for last */
710 if (tx
->packet_len
& (sizeof(u32
) - 1)) {
711 rval
= _pad_sdma_tx_descs(dd
, tx
);
715 _sdma_close_tx(dd
, tx
);
723 * sdma_txadd_page() - add a page to the sdma_txreq
724 * @dd: the device to use for mapping
725 * @tx: tx request to which the page is added
727 * @offset: offset within the page
728 * @len: length in bytes
730 * This is used to add a page/offset/length descriptor.
732 * The mapping/unmapping of the page/offset/len is automatically handled.
735 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
736 * extend/coalesce descriptor array
738 static inline int sdma_txadd_page(
739 struct hfi1_devdata
*dd
,
740 struct sdma_txreq
*tx
,
742 unsigned long offset
,
748 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
749 rval
= ext_coal_sdma_tx_descs(dd
, tx
, SDMA_MAP_PAGE
,
750 NULL
, page
, offset
, len
);
762 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
763 __sdma_txclean(dd
, tx
);
767 return _sdma_txadd_daddr(
768 dd
, SDMA_MAP_PAGE
, tx
, addr
, len
);
772 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
773 * @dd: the device to use for mapping
774 * @tx: sdma_txreq to which the page is added
775 * @addr: dma address mapped by caller
776 * @len: length in bytes
778 * This is used to add a descriptor for memory that is already dma mapped.
780 * In this case, there is no unmapping as part of the progress processing for
781 * this memory location.
784 * 0 - success, -ENOMEM - couldn't extend descriptor array
787 static inline int sdma_txadd_daddr(
788 struct hfi1_devdata
*dd
,
789 struct sdma_txreq
*tx
,
795 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
796 rval
= ext_coal_sdma_tx_descs(dd
, tx
, SDMA_MAP_NONE
,
802 return _sdma_txadd_daddr(dd
, SDMA_MAP_NONE
, tx
, addr
, len
);
806 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
807 * @dd: the device to use for mapping
808 * @tx: sdma_txreq to which the page is added
809 * @kvaddr: the kernel virtual address
810 * @len: length in bytes
812 * This is used to add a descriptor referenced by the indicated kvaddr and
815 * The mapping/unmapping of the kvaddr and len is automatically handled.
818 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
821 static inline int sdma_txadd_kvaddr(
822 struct hfi1_devdata
*dd
,
823 struct sdma_txreq
*tx
,
830 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
831 rval
= ext_coal_sdma_tx_descs(dd
, tx
, SDMA_MAP_SINGLE
,
832 kvaddr
, NULL
, 0, len
);
837 addr
= dma_map_single(
843 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
844 __sdma_txclean(dd
, tx
);
848 return _sdma_txadd_daddr(
849 dd
, SDMA_MAP_SINGLE
, tx
, addr
, len
);
854 int sdma_send_txreq(struct sdma_engine
*sde
,
856 struct sdma_txreq
*tx
,
858 int sdma_send_txlist(struct sdma_engine
*sde
,
860 struct list_head
*tx_list
,
863 int sdma_ahg_alloc(struct sdma_engine
*sde
);
864 void sdma_ahg_free(struct sdma_engine
*sde
, int ahg_index
);
867 * sdma_build_ahg - build ahg descriptor
873 * Build and return a 32 bit descriptor.
875 static inline u32
sdma_build_ahg_descriptor(
881 return (u32
)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT
|
882 ((startbit
& SDMA_AHG_FIELD_START_MASK
) <<
883 SDMA_AHG_FIELD_START_SHIFT
) |
884 ((bits
& SDMA_AHG_FIELD_LEN_MASK
) <<
885 SDMA_AHG_FIELD_LEN_SHIFT
) |
886 ((dwindex
& SDMA_AHG_INDEX_MASK
) <<
887 SDMA_AHG_INDEX_SHIFT
) |
888 ((data
& SDMA_AHG_VALUE_MASK
) <<
889 SDMA_AHG_VALUE_SHIFT
));
893 * sdma_progress - use seq number of detect head progress
894 * @sde: sdma_engine to check
895 * @seq: base seq count
896 * @tx: txreq for which we need to check descriptor availability
898 * This is used in the appropriate spot in the sleep routine
899 * to check for potential ring progress. This routine gets the
900 * seqcount before queuing the iowait structure for progress.
902 * If the seqcount indicates that progress needs to be checked,
903 * re-submission is detected by checking whether the descriptor
904 * queue has enough descriptor for the txreq.
906 static inline unsigned sdma_progress(struct sdma_engine
*sde
, unsigned seq
,
907 struct sdma_txreq
*tx
)
909 if (read_seqretry(&sde
->head_lock
, seq
)) {
910 sde
->desc_avail
= sdma_descq_freecnt(sde
);
911 if (tx
->num_desc
> sde
->desc_avail
)
919 * sdma_iowait_schedule() - initialize wait structure
920 * @sde: sdma_engine to schedule
921 * @wait: wait struct to schedule
923 * This function initializes the iowait
924 * structure embedded in the QP or PQ.
927 static inline void sdma_iowait_schedule(
928 struct sdma_engine
*sde
,
931 struct hfi1_pportdata
*ppd
= sde
->dd
->pport
;
933 iowait_schedule(wait
, ppd
->hfi1_wq
, sde
->cpu
);
936 /* for use by interrupt handling */
937 void sdma_engine_error(struct sdma_engine
*sde
, u64 status
);
938 void sdma_engine_interrupt(struct sdma_engine
*sde
, u64 status
);
942 * The diagram below details the relationship of the mapping structures
944 * Since the mapping now allows for non-uniform engines per vl, the
945 * number of engines for a vl is either the vl_engines[vl] or
946 * a computation based on num_sdma/num_vls:
949 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
951 * n = roundup to next highest power of 2 using nactual
953 * In the case where there are num_sdma/num_vls doesn't divide
954 * evenly, the extras are added from the last vl downward.
956 * For the case where n > nactual, the engines are assigned
957 * in a round robin fashion wrapping back to the first engine
958 * for a particular vl.
962 * | +--------------------+
964 * sdma_vl_map |--------------------|
965 * +--------------------------+ | sde[0] -> eng 1 |
966 * | list (RCU) | |--------------------|
967 * |--------------------------| ->| sde[1] -> eng 2 |
968 * | mask | --/ |--------------------|
969 * |--------------------------| -/ | * |
970 * | actual_vls (max 8) | -/ |--------------------|
971 * |--------------------------| --/ | sde[n-1] -> eng n |
972 * | vls (max 8) | -/ +--------------------+
973 * |--------------------------| --/
975 * |--------------------------| +---------------------+
976 * | map[1] |--- | mask |
977 * |--------------------------| \---- |---------------------|
978 * | * | \-- | sde[0] -> eng 1+n |
979 * | * | \---- |---------------------|
980 * | * | \->| sde[1] -> eng 2+n |
981 * |--------------------------| |---------------------|
982 * | map[vls - 1] |- | * |
983 * +--------------------------+ \- |---------------------|
984 * \- | sde[m-1] -> eng m+n |
985 * \ +---------------------+
988 * \- +----------------------+
990 * \ |----------------------|
991 * \- | sde[0] -> eng 1+m+n |
992 * \- |----------------------|
993 * >| sde[1] -> eng 2+m+n |
994 * |----------------------|
996 * |----------------------|
997 * | sde[o-1] -> eng o+m+n|
998 * +----------------------+
1003 * struct sdma_map_elem - mapping for a vl
1004 * @mask - selector mask
1005 * @sde - array of engines for this vl
1007 * The mask is used to "mod" the selector
1008 * to produce index into the trailing
1011 struct sdma_map_elem
{
1013 struct sdma_engine
*sde
[0];
1017 * struct sdma_map_el - mapping for a vl
1018 * @engine_to_vl - map of an engine to a vl
1019 * @list - rcu head for free callback
1020 * @mask - vl mask to "mod" the vl to produce an index to map array
1021 * @actual_vls - number of vls
1022 * @vls - number of vls rounded to next power of 2
1023 * @map - array of sdma_map_elem entries
1025 * This is the parent mapping structure. The trailing
1026 * members of the struct point to sdma_map_elem entries, which
1027 * in turn point to an array of sde's for that vl.
1029 struct sdma_vl_map
{
1030 s8 engine_to_vl
[TXE_NUM_SDMA_ENGINES
];
1031 struct rcu_head list
;
1035 struct sdma_map_elem
*map
[0];
1039 struct hfi1_devdata
*dd
,
1045 void _sdma_engine_progress_schedule(struct sdma_engine
*sde
);
1048 * sdma_engine_progress_schedule() - schedule progress on engine
1049 * @sde: sdma_engine to schedule progress
1051 * This is the fast path.
1054 static inline void sdma_engine_progress_schedule(
1055 struct sdma_engine
*sde
)
1057 if (!sde
|| sdma_descq_inprocess(sde
) < (sde
->descq_cnt
/ 8))
1059 _sdma_engine_progress_schedule(sde
);
1062 struct sdma_engine
*sdma_select_engine_sc(
1063 struct hfi1_devdata
*dd
,
1067 struct sdma_engine
*sdma_select_engine_vl(
1068 struct hfi1_devdata
*dd
,
1072 struct sdma_engine
*sdma_select_user_engine(struct hfi1_devdata
*dd
,
1073 u32 selector
, u8 vl
);
1074 ssize_t
sdma_get_cpu_to_sde_map(struct sdma_engine
*sde
, char *buf
);
1075 ssize_t
sdma_set_cpu_to_sde_map(struct sdma_engine
*sde
, const char *buf
,
1077 int sdma_engine_get_vl(struct sdma_engine
*sde
);
1078 void sdma_seqfile_dump_sde(struct seq_file
*s
, struct sdma_engine
*);
1079 void sdma_seqfile_dump_cpu_list(struct seq_file
*s
, struct hfi1_devdata
*dd
,
1080 unsigned long cpuid
);
1082 #ifdef CONFIG_SDMA_VERBOSITY
1083 void sdma_dumpstate(struct sdma_engine
*);
1085 static inline char *slashstrip(char *s
)
1095 u16
sdma_get_descq_cnt(void);
1097 extern uint mod_num_sdma
;
1099 void sdma_update_lmc(struct hfi1_devdata
*dd
, u64 mask
, u32 lid
);