4 * Copyright(c) 2015 - 2018 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/types.h>
51 #include <linux/list.h>
52 #include <asm/byteorder.h>
53 #include <linux/workqueue.h>
54 #include <linux/rculist.h>
58 #include "sdma_txreq.h"
62 /* Hardware limit for SDMA packet size */
63 #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
65 #define SDMA_MAP_NONE 0
66 #define SDMA_MAP_SINGLE 1
67 #define SDMA_MAP_PAGE 2
69 #define SDMA_AHG_VALUE_MASK 0xffff
70 #define SDMA_AHG_VALUE_SHIFT 0
71 #define SDMA_AHG_INDEX_MASK 0xf
72 #define SDMA_AHG_INDEX_SHIFT 16
73 #define SDMA_AHG_FIELD_LEN_MASK 0xf
74 #define SDMA_AHG_FIELD_LEN_SHIFT 20
75 #define SDMA_AHG_FIELD_START_MASK 0x1f
76 #define SDMA_AHG_FIELD_START_SHIFT 24
77 #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
78 #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
83 * Be aware the ordering and values
84 * for SDMA_AHG_APPLY_UPDATE[123]
85 * are assumed in generating a skip
86 * count in submit_tx() in sdma.c
88 #define SDMA_AHG_NO_AHG 0
89 #define SDMA_AHG_COPY 1
90 #define SDMA_AHG_APPLY_UPDATE1 2
91 #define SDMA_AHG_APPLY_UPDATE2 3
92 #define SDMA_AHG_APPLY_UPDATE3 4
95 * Bits defined in the send DMA descriptor.
97 #define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
98 #define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
99 #define SDMA_DESC0_BYTE_COUNT_SHIFT 48
100 #define SDMA_DESC0_BYTE_COUNT_WIDTH 14
101 #define SDMA_DESC0_BYTE_COUNT_MASK \
102 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
103 #define SDMA_DESC0_BYTE_COUNT_SMASK \
104 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
105 #define SDMA_DESC0_PHY_ADDR_SHIFT 0
106 #define SDMA_DESC0_PHY_ADDR_WIDTH 48
107 #define SDMA_DESC0_PHY_ADDR_MASK \
108 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
109 #define SDMA_DESC0_PHY_ADDR_SMASK \
110 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
112 #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
113 #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
114 #define SDMA_DESC1_HEADER_UPDATE1_MASK \
115 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
116 #define SDMA_DESC1_HEADER_UPDATE1_SMASK \
117 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
118 #define SDMA_DESC1_HEADER_MODE_SHIFT 13
119 #define SDMA_DESC1_HEADER_MODE_WIDTH 3
120 #define SDMA_DESC1_HEADER_MODE_MASK \
121 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
122 #define SDMA_DESC1_HEADER_MODE_SMASK \
123 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
124 #define SDMA_DESC1_HEADER_INDEX_SHIFT 8
125 #define SDMA_DESC1_HEADER_INDEX_WIDTH 5
126 #define SDMA_DESC1_HEADER_INDEX_MASK \
127 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
128 #define SDMA_DESC1_HEADER_INDEX_SMASK \
129 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
130 #define SDMA_DESC1_HEADER_DWS_SHIFT 4
131 #define SDMA_DESC1_HEADER_DWS_WIDTH 4
132 #define SDMA_DESC1_HEADER_DWS_MASK \
133 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
134 #define SDMA_DESC1_HEADER_DWS_SMASK \
135 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
136 #define SDMA_DESC1_GENERATION_SHIFT 2
137 #define SDMA_DESC1_GENERATION_WIDTH 2
138 #define SDMA_DESC1_GENERATION_MASK \
139 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
140 #define SDMA_DESC1_GENERATION_SMASK \
141 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
142 #define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
143 #define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
146 sdma_state_s00_hw_down
,
147 sdma_state_s10_hw_start_up_halt_wait
,
148 sdma_state_s15_hw_start_up_clean_wait
,
150 sdma_state_s30_sw_clean_up_wait
,
151 sdma_state_s40_hw_clean_up_wait
,
152 sdma_state_s50_hw_halt_wait
,
153 sdma_state_s60_idle_halt_wait
,
154 sdma_state_s80_hw_freeze
,
155 sdma_state_s82_freeze_sw_clean
,
156 sdma_state_s99_running
,
160 sdma_event_e00_go_hw_down
,
161 sdma_event_e10_go_hw_start
,
162 sdma_event_e15_hw_halt_done
,
163 sdma_event_e25_hw_clean_up_done
,
164 sdma_event_e30_go_running
,
165 sdma_event_e40_sw_cleaned
,
166 sdma_event_e50_hw_cleaned
,
167 sdma_event_e60_hw_halted
,
168 sdma_event_e70_go_idle
,
169 sdma_event_e80_hw_freeze
,
170 sdma_event_e81_hw_frozen
,
171 sdma_event_e82_hw_unfreeze
,
172 sdma_event_e85_link_down
,
173 sdma_event_e90_sw_halted
,
176 struct sdma_set_state_action
{
177 unsigned op_enable
:1;
178 unsigned op_intenable
:1;
180 unsigned op_cleanup
:1;
181 unsigned go_s99_running_tofalse
:1;
182 unsigned go_s99_running_totrue
:1;
187 struct completion comp
;
188 enum sdma_states current_state
;
190 unsigned go_s99_running
;
191 /* debugging/development */
192 enum sdma_states previous_state
;
193 unsigned previous_op
;
194 enum sdma_events last_event
;
198 * DOC: sdma exported routines
200 * These sdma routines fit into three categories:
201 * - The SDMA API for building and submitting packets
204 * - Initialization and tear down routines to buildup
207 * - ISR entrances to handle interrupts, state changes
212 * DOC: sdma PSM/verbs API
214 * The sdma API is designed to be used by both PSM
215 * and verbs to supply packets to the SDMA ring.
217 * The usage of the API is as follows:
219 * Embed a struct iowait in the QP or
220 * PQ. The iowait should be initialized with a
221 * call to iowait_init().
223 * The user of the API should create an allocation method
224 * for their version of the txreq. slabs, pre-allocated lists,
225 * and dma pools can be used. Once the user's overload of
226 * the sdma_txreq has been allocated, the sdma_txreq member
227 * must be initialized with sdma_txinit() or sdma_txinit_ahg().
229 * The txreq must be declared with the sdma_txreq first.
231 * The tx request, once initialized, is manipulated with calls to
232 * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
233 * for each disjoint memory location. It is the user's responsibility
234 * to understand the packet boundaries and page boundaries to do the
235 * appropriate number of sdma_txadd_* calls.. The user
236 * must be prepared to deal with failures from these routines due to
237 * either memory allocation or dma_mapping failures.
239 * The mapping specifics for each memory location are recorded
240 * in the tx. Memory locations added with sdma_txadd_page()
241 * and sdma_txadd_kvaddr() are automatically mapped when added
242 * to the tx and nmapped as part of the progress processing in the
243 * SDMA interrupt handling.
245 * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
246 * tx. An example of a use case would be a pre-allocated
247 * set of headers allocated via dma_pool_alloc() or
248 * dma_alloc_coherent(). For these memory locations, it
249 * is the responsibility of the user to handle that unmapping.
250 * (This would usually be at an unload or job termination.)
252 * The routine sdma_send_txreq() is used to submit
253 * a tx to the ring after the appropriate number of
254 * sdma_txadd_* have been done.
256 * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
257 * can be used to submit a list of packets.
259 * The user is free to use the link overhead in the struct sdma_txreq as
260 * long as the tx isn't in flight.
262 * The extreme degenerate case of the number of descriptors
263 * exceeding the ring size is automatically handled as
264 * memory locations are added. An overflow of the descriptor
265 * array that is part of the sdma_txreq is also automatically
271 * DOC: Infrastructure calls
273 * sdma_init() is used to initialize data structures and
274 * CSRs for the desired number of SDMA engines.
276 * sdma_start() is used to kick the SDMA engines initialized
277 * with sdma_init(). Interrupts must be enabled at this
278 * point since aspects of the state machine are interrupt
281 * sdma_engine_error() and sdma_engine_interrupt() are
282 * entrances for interrupts.
284 * sdma_map_init() is for the management of the mapping
285 * table when the number of vls is changed.
290 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
292 * This is the raw descriptor in the SDMA ring
294 struct hw_sdma_desc
{
295 /* private: don't use directly */
300 * struct sdma_engine - Data pertaining to each SDMA engine.
301 * @dd: a back-pointer to the device data
302 * @ppd: per port back-pointer
303 * @imask: mask for irq manipulation
304 * @idle_mask: mask for determining if an interrupt is due to sdma_idle
306 * This structure has the state for each sdma_engine.
308 * Accessing to non public fields are not supported
309 * since the private members are subject to change.
313 struct hfi1_devdata
*dd
;
314 struct hfi1_pportdata
*ppd
;
316 void __iomem
*tail_csr
;
317 u64 imask
; /* clear interrupt mask */
322 volatile __le64
*head_dma
; /* DMA'ed by chip */
324 dma_addr_t head_phys
;
326 struct hw_sdma_desc
*descq
;
328 unsigned descq_full_count
;
329 struct sdma_txreq
**tx_ring
;
331 dma_addr_t descq_phys
;
335 struct sdma_state state
;
341 u8 this_idx
; /* zero relative engine */
342 /* protect changes to senddmactrl shadow */
343 spinlock_t senddmactrl_lock
;
345 u64 p_senddmactrl
; /* shadow per-engine SendDmaCtrl */
347 /* read/write using tail_lock */
348 spinlock_t tail_lock ____cacheline_aligned_in_smp
;
349 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
356 unsigned long ahg_bits
;
364 /* read/write using head_lock */
366 seqlock_t head_lock ____cacheline_aligned_in_smp
;
367 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
382 u64 progress_int_cnt
;
386 struct list_head dmawait
;
388 /* CONFIG SDMA for now, just blindly duplicate */
390 struct tasklet_struct sdma_hw_clean_up_task
391 ____cacheline_aligned_in_smp
;
394 struct tasklet_struct sdma_sw_clean_up_task
395 ____cacheline_aligned_in_smp
;
397 struct work_struct err_halt_worker
;
399 struct timer_list err_progress_check_timer
;
400 u32 progress_check_head
;
402 struct work_struct flush_worker
;
403 /* protect flush list */
404 spinlock_t flushlist_lock
;
406 struct list_head flushlist
;
407 struct cpumask cpu_mask
;
412 int sdma_init(struct hfi1_devdata
*dd
, u8 port
);
413 void sdma_start(struct hfi1_devdata
*dd
);
414 void sdma_exit(struct hfi1_devdata
*dd
);
415 void sdma_clean(struct hfi1_devdata
*dd
, size_t num_engines
);
416 void sdma_all_running(struct hfi1_devdata
*dd
);
417 void sdma_all_idle(struct hfi1_devdata
*dd
);
418 void sdma_freeze_notify(struct hfi1_devdata
*dd
, int go_idle
);
419 void sdma_freeze(struct hfi1_devdata
*dd
);
420 void sdma_unfreeze(struct hfi1_devdata
*dd
);
421 void sdma_wait(struct hfi1_devdata
*dd
);
424 * sdma_empty() - idle engine test
425 * @engine: sdma engine
427 * Currently used by verbs as a latency optimization.
430 * 1 - empty, 0 - non-empty
432 static inline int sdma_empty(struct sdma_engine
*sde
)
434 return sde
->descq_tail
== sde
->descq_head
;
437 static inline u16
sdma_descq_freecnt(struct sdma_engine
*sde
)
439 return sde
->descq_cnt
-
441 READ_ONCE(sde
->descq_head
)) - 1;
444 static inline u16
sdma_descq_inprocess(struct sdma_engine
*sde
)
446 return sde
->descq_cnt
- sdma_descq_freecnt(sde
);
450 * Either head_lock or tail lock required to see
453 static inline int __sdma_running(struct sdma_engine
*engine
)
455 return engine
->state
.current_state
== sdma_state_s99_running
;
459 * sdma_running() - state suitability test
460 * @engine: sdma engine
462 * sdma_running probes the internal state to determine if it is suitable
463 * for submitting packets.
466 * 1 - ok to submit, 0 - not ok to submit
469 static inline int sdma_running(struct sdma_engine
*engine
)
474 spin_lock_irqsave(&engine
->tail_lock
, flags
);
475 ret
= __sdma_running(engine
);
476 spin_unlock_irqrestore(&engine
->tail_lock
, flags
);
480 void _sdma_txreq_ahgadd(
481 struct sdma_txreq
*tx
,
488 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
489 * @tx: tx request to initialize
490 * @flags: flags to key last descriptor additions
491 * @tlen: total packet length (pbc + headers + data)
492 * @ahg_entry: ahg entry to use (0 - 31)
493 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
494 * @ahg: array of AHG descriptors (up to 9 entries)
495 * @ahg_hlen: number of bytes from ASIC entry to use
498 * The allocation of the sdma_txreq and it enclosing structure is user
499 * dependent. This routine must be called to initialize the user independent
502 * The currently supported flags are SDMA_TXREQ_F_URGENT,
503 * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
505 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
506 * completion is desired as soon as possible.
508 * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
509 * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
510 * the AHG descriptors into the first 1 to 3 descriptors.
512 * Completions of submitted requests can be gotten on selected
513 * txreqs by giving a completion routine callback to sdma_txinit() or
514 * sdma_txinit_ahg(). The environment in which the callback runs
515 * can be from an ISR, a tasklet, or a thread, so no sleeping
516 * kernel routines can be used. Aspects of the sdma ring may
517 * be locked so care should be taken with locking.
519 * The callback pointer can be NULL to avoid any callback for the packet
520 * being submitted. The callback will be provided this tx, a status, and a flag.
522 * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
523 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
525 * The flag, if the is the iowait had been used, indicates the iowait
526 * sdma_busy count has reached zero.
528 * user data portion of tlen should be precise. The sdma_txadd_* entrances
529 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
530 * specified in tlen have been supplied to the sdma_txreq.
532 * ahg_hlen is used to determine the number of on-chip entry bytes to
533 * use as the header. This is for cases where the stored header is
534 * larger than the header to be used in a packet. This is typical
535 * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
536 * and RDMA_WRITE_MIDDLE.
539 static inline int sdma_txinit_ahg(
540 struct sdma_txreq
*tx
,
547 void (*cb
)(struct sdma_txreq
*, int))
551 if (tlen
> MAX_SDMA_PKT_SIZE
)
553 tx
->desc_limit
= ARRAY_SIZE(tx
->descs
);
554 tx
->descp
= &tx
->descs
[0];
555 INIT_LIST_HEAD(&tx
->list
);
559 tx
->coalesce_buf
= NULL
;
561 tx
->packet_len
= tlen
;
562 tx
->tlen
= tx
->packet_len
;
563 tx
->descs
[0].qw
[0] = SDMA_DESC0_FIRST_DESC_FLAG
;
564 tx
->descs
[0].qw
[1] = 0;
565 if (flags
& SDMA_TXREQ_F_AHG_COPY
)
566 tx
->descs
[0].qw
[1] |=
567 (((u64
)ahg_entry
& SDMA_DESC1_HEADER_INDEX_MASK
)
568 << SDMA_DESC1_HEADER_INDEX_SHIFT
) |
569 (((u64
)SDMA_AHG_COPY
& SDMA_DESC1_HEADER_MODE_MASK
)
570 << SDMA_DESC1_HEADER_MODE_SHIFT
);
571 else if (flags
& SDMA_TXREQ_F_USE_AHG
&& num_ahg
)
572 _sdma_txreq_ahgadd(tx
, num_ahg
, ahg_entry
, ahg
, ahg_hlen
);
577 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
578 * @tx: tx request to initialize
579 * @flags: flags to key last descriptor additions
580 * @tlen: total packet length (pbc + headers + data)
581 * @cb: callback pointer
583 * The allocation of the sdma_txreq and it enclosing structure is user
584 * dependent. This routine must be called to initialize the user
585 * independent fields.
587 * The currently supported flags is SDMA_TXREQ_F_URGENT.
589 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
590 * completion is desired as soon as possible.
592 * Completions of submitted requests can be gotten on selected
593 * txreqs by giving a completion routine callback to sdma_txinit() or
594 * sdma_txinit_ahg(). The environment in which the callback runs
595 * can be from an ISR, a tasklet, or a thread, so no sleeping
596 * kernel routines can be used. The head size of the sdma ring may
597 * be locked so care should be taken with locking.
599 * The callback pointer can be NULL to avoid any callback for the packet
602 * The callback, if non-NULL, will be provided this tx and a status. The
603 * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
604 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
607 static inline int sdma_txinit(
608 struct sdma_txreq
*tx
,
611 void (*cb
)(struct sdma_txreq
*, int))
613 return sdma_txinit_ahg(tx
, flags
, tlen
, 0, 0, NULL
, 0, cb
);
616 /* helpers - don't use */
617 static inline int sdma_mapping_type(struct sdma_desc
*d
)
619 return (d
->qw
[1] & SDMA_DESC1_GENERATION_SMASK
)
620 >> SDMA_DESC1_GENERATION_SHIFT
;
623 static inline size_t sdma_mapping_len(struct sdma_desc
*d
)
625 return (d
->qw
[0] & SDMA_DESC0_BYTE_COUNT_SMASK
)
626 >> SDMA_DESC0_BYTE_COUNT_SHIFT
;
629 static inline dma_addr_t
sdma_mapping_addr(struct sdma_desc
*d
)
631 return (d
->qw
[0] & SDMA_DESC0_PHY_ADDR_SMASK
)
632 >> SDMA_DESC0_PHY_ADDR_SHIFT
;
635 static inline void make_tx_sdma_desc(
636 struct sdma_txreq
*tx
,
641 struct sdma_desc
*desc
= &tx
->descp
[tx
->num_desc
];
644 /* qw[0] zero; qw[1] first, ahg mode already in from init */
645 desc
->qw
[1] |= ((u64
)type
& SDMA_DESC1_GENERATION_MASK
)
646 << SDMA_DESC1_GENERATION_SHIFT
;
649 desc
->qw
[1] = ((u64
)type
& SDMA_DESC1_GENERATION_MASK
)
650 << SDMA_DESC1_GENERATION_SHIFT
;
652 desc
->qw
[0] |= (((u64
)addr
& SDMA_DESC0_PHY_ADDR_MASK
)
653 << SDMA_DESC0_PHY_ADDR_SHIFT
) |
654 (((u64
)len
& SDMA_DESC0_BYTE_COUNT_MASK
)
655 << SDMA_DESC0_BYTE_COUNT_SHIFT
);
658 /* helper to extend txreq */
659 int ext_coal_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
,
660 int type
, void *kvaddr
, struct page
*page
,
661 unsigned long offset
, u16 len
);
662 int _pad_sdma_tx_descs(struct hfi1_devdata
*, struct sdma_txreq
*);
663 void __sdma_txclean(struct hfi1_devdata
*, struct sdma_txreq
*);
665 static inline void sdma_txclean(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
668 __sdma_txclean(dd
, tx
);
671 /* helpers used by public routines */
672 static inline void _sdma_close_tx(struct hfi1_devdata
*dd
,
673 struct sdma_txreq
*tx
)
675 tx
->descp
[tx
->num_desc
].qw
[0] |=
676 SDMA_DESC0_LAST_DESC_FLAG
;
677 tx
->descp
[tx
->num_desc
].qw
[1] |=
679 if (tx
->flags
& SDMA_TXREQ_F_URGENT
)
680 tx
->descp
[tx
->num_desc
].qw
[1] |=
681 (SDMA_DESC1_HEAD_TO_HOST_FLAG
|
682 SDMA_DESC1_INT_REQ_FLAG
);
685 static inline int _sdma_txadd_daddr(
686 struct hfi1_devdata
*dd
,
688 struct sdma_txreq
*tx
,
698 WARN_ON(len
> tx
->tlen
);
700 /* special cases for last */
702 if (tx
->packet_len
& (sizeof(u32
) - 1)) {
703 rval
= _pad_sdma_tx_descs(dd
, tx
);
707 _sdma_close_tx(dd
, tx
);
715 * sdma_txadd_page() - add a page to the sdma_txreq
716 * @dd: the device to use for mapping
717 * @tx: tx request to which the page is added
719 * @offset: offset within the page
720 * @len: length in bytes
722 * This is used to add a page/offset/length descriptor.
724 * The mapping/unmapping of the page/offset/len is automatically handled.
727 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
728 * extend/coalesce descriptor array
730 static inline int sdma_txadd_page(
731 struct hfi1_devdata
*dd
,
732 struct sdma_txreq
*tx
,
734 unsigned long offset
,
740 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
741 rval
= ext_coal_sdma_tx_descs(dd
, tx
, SDMA_MAP_PAGE
,
742 NULL
, page
, offset
, len
);
754 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
755 __sdma_txclean(dd
, tx
);
759 return _sdma_txadd_daddr(
760 dd
, SDMA_MAP_PAGE
, tx
, addr
, len
);
764 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
765 * @dd: the device to use for mapping
766 * @tx: sdma_txreq to which the page is added
767 * @addr: dma address mapped by caller
768 * @len: length in bytes
770 * This is used to add a descriptor for memory that is already dma mapped.
772 * In this case, there is no unmapping as part of the progress processing for
773 * this memory location.
776 * 0 - success, -ENOMEM - couldn't extend descriptor array
779 static inline int sdma_txadd_daddr(
780 struct hfi1_devdata
*dd
,
781 struct sdma_txreq
*tx
,
787 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
788 rval
= ext_coal_sdma_tx_descs(dd
, tx
, SDMA_MAP_NONE
,
794 return _sdma_txadd_daddr(dd
, SDMA_MAP_NONE
, tx
, addr
, len
);
798 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
799 * @dd: the device to use for mapping
800 * @tx: sdma_txreq to which the page is added
801 * @kvaddr: the kernel virtual address
802 * @len: length in bytes
804 * This is used to add a descriptor referenced by the indicated kvaddr and
807 * The mapping/unmapping of the kvaddr and len is automatically handled.
810 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
813 static inline int sdma_txadd_kvaddr(
814 struct hfi1_devdata
*dd
,
815 struct sdma_txreq
*tx
,
822 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
823 rval
= ext_coal_sdma_tx_descs(dd
, tx
, SDMA_MAP_SINGLE
,
824 kvaddr
, NULL
, 0, len
);
829 addr
= dma_map_single(
835 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
836 __sdma_txclean(dd
, tx
);
840 return _sdma_txadd_daddr(
841 dd
, SDMA_MAP_SINGLE
, tx
, addr
, len
);
846 int sdma_send_txreq(struct sdma_engine
*sde
,
847 struct iowait_work
*wait
,
848 struct sdma_txreq
*tx
,
850 int sdma_send_txlist(struct sdma_engine
*sde
,
851 struct iowait_work
*wait
,
852 struct list_head
*tx_list
,
855 int sdma_ahg_alloc(struct sdma_engine
*sde
);
856 void sdma_ahg_free(struct sdma_engine
*sde
, int ahg_index
);
859 * sdma_build_ahg - build ahg descriptor
865 * Build and return a 32 bit descriptor.
867 static inline u32
sdma_build_ahg_descriptor(
873 return (u32
)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT
|
874 ((startbit
& SDMA_AHG_FIELD_START_MASK
) <<
875 SDMA_AHG_FIELD_START_SHIFT
) |
876 ((bits
& SDMA_AHG_FIELD_LEN_MASK
) <<
877 SDMA_AHG_FIELD_LEN_SHIFT
) |
878 ((dwindex
& SDMA_AHG_INDEX_MASK
) <<
879 SDMA_AHG_INDEX_SHIFT
) |
880 ((data
& SDMA_AHG_VALUE_MASK
) <<
881 SDMA_AHG_VALUE_SHIFT
));
885 * sdma_progress - use seq number of detect head progress
886 * @sde: sdma_engine to check
887 * @seq: base seq count
888 * @tx: txreq for which we need to check descriptor availability
890 * This is used in the appropriate spot in the sleep routine
891 * to check for potential ring progress. This routine gets the
892 * seqcount before queuing the iowait structure for progress.
894 * If the seqcount indicates that progress needs to be checked,
895 * re-submission is detected by checking whether the descriptor
896 * queue has enough descriptor for the txreq.
898 static inline unsigned sdma_progress(struct sdma_engine
*sde
, unsigned seq
,
899 struct sdma_txreq
*tx
)
901 if (read_seqretry(&sde
->head_lock
, seq
)) {
902 sde
->desc_avail
= sdma_descq_freecnt(sde
);
903 if (tx
->num_desc
> sde
->desc_avail
)
911 * sdma_iowait_schedule() - initialize wait structure
912 * @sde: sdma_engine to schedule
913 * @wait: wait struct to schedule
915 * This function initializes the iowait
916 * structure embedded in the QP or PQ.
919 static inline void sdma_iowait_schedule(
920 struct sdma_engine
*sde
,
923 struct hfi1_pportdata
*ppd
= sde
->dd
->pport
;
925 iowait_schedule(wait
, ppd
->hfi1_wq
, sde
->cpu
);
928 /* for use by interrupt handling */
929 void sdma_engine_error(struct sdma_engine
*sde
, u64 status
);
930 void sdma_engine_interrupt(struct sdma_engine
*sde
, u64 status
);
934 * The diagram below details the relationship of the mapping structures
936 * Since the mapping now allows for non-uniform engines per vl, the
937 * number of engines for a vl is either the vl_engines[vl] or
938 * a computation based on num_sdma/num_vls:
941 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
943 * n = roundup to next highest power of 2 using nactual
945 * In the case where there are num_sdma/num_vls doesn't divide
946 * evenly, the extras are added from the last vl downward.
948 * For the case where n > nactual, the engines are assigned
949 * in a round robin fashion wrapping back to the first engine
950 * for a particular vl.
954 * | +--------------------+
956 * sdma_vl_map |--------------------|
957 * +--------------------------+ | sde[0] -> eng 1 |
958 * | list (RCU) | |--------------------|
959 * |--------------------------| ->| sde[1] -> eng 2 |
960 * | mask | --/ |--------------------|
961 * |--------------------------| -/ | * |
962 * | actual_vls (max 8) | -/ |--------------------|
963 * |--------------------------| --/ | sde[n-1] -> eng n |
964 * | vls (max 8) | -/ +--------------------+
965 * |--------------------------| --/
967 * |--------------------------| +---------------------+
968 * | map[1] |--- | mask |
969 * |--------------------------| \---- |---------------------|
970 * | * | \-- | sde[0] -> eng 1+n |
971 * | * | \---- |---------------------|
972 * | * | \->| sde[1] -> eng 2+n |
973 * |--------------------------| |---------------------|
974 * | map[vls - 1] |- | * |
975 * +--------------------------+ \- |---------------------|
976 * \- | sde[m-1] -> eng m+n |
977 * \ +---------------------+
980 * \- +----------------------+
982 * \ |----------------------|
983 * \- | sde[0] -> eng 1+m+n |
984 * \- |----------------------|
985 * >| sde[1] -> eng 2+m+n |
986 * |----------------------|
988 * |----------------------|
989 * | sde[o-1] -> eng o+m+n|
990 * +----------------------+
995 * struct sdma_map_elem - mapping for a vl
996 * @mask - selector mask
997 * @sde - array of engines for this vl
999 * The mask is used to "mod" the selector
1000 * to produce index into the trailing
1003 struct sdma_map_elem
{
1005 struct sdma_engine
*sde
[];
1009 * struct sdma_map_el - mapping for a vl
1010 * @engine_to_vl - map of an engine to a vl
1011 * @list - rcu head for free callback
1012 * @mask - vl mask to "mod" the vl to produce an index to map array
1013 * @actual_vls - number of vls
1014 * @vls - number of vls rounded to next power of 2
1015 * @map - array of sdma_map_elem entries
1017 * This is the parent mapping structure. The trailing
1018 * members of the struct point to sdma_map_elem entries, which
1019 * in turn point to an array of sde's for that vl.
1021 struct sdma_vl_map
{
1022 s8 engine_to_vl
[TXE_NUM_SDMA_ENGINES
];
1023 struct rcu_head list
;
1027 struct sdma_map_elem
*map
[];
1031 struct hfi1_devdata
*dd
,
1037 void _sdma_engine_progress_schedule(struct sdma_engine
*sde
);
1040 * sdma_engine_progress_schedule() - schedule progress on engine
1041 * @sde: sdma_engine to schedule progress
1043 * This is the fast path.
1046 static inline void sdma_engine_progress_schedule(
1047 struct sdma_engine
*sde
)
1049 if (!sde
|| sdma_descq_inprocess(sde
) < (sde
->descq_cnt
/ 8))
1051 _sdma_engine_progress_schedule(sde
);
1054 struct sdma_engine
*sdma_select_engine_sc(
1055 struct hfi1_devdata
*dd
,
1059 struct sdma_engine
*sdma_select_engine_vl(
1060 struct hfi1_devdata
*dd
,
1064 struct sdma_engine
*sdma_select_user_engine(struct hfi1_devdata
*dd
,
1065 u32 selector
, u8 vl
);
1066 ssize_t
sdma_get_cpu_to_sde_map(struct sdma_engine
*sde
, char *buf
);
1067 ssize_t
sdma_set_cpu_to_sde_map(struct sdma_engine
*sde
, const char *buf
,
1069 int sdma_engine_get_vl(struct sdma_engine
*sde
);
1070 void sdma_seqfile_dump_sde(struct seq_file
*s
, struct sdma_engine
*);
1071 void sdma_seqfile_dump_cpu_list(struct seq_file
*s
, struct hfi1_devdata
*dd
,
1072 unsigned long cpuid
);
1074 #ifdef CONFIG_SDMA_VERBOSITY
1075 void sdma_dumpstate(struct sdma_engine
*);
1077 static inline char *slashstrip(char *s
)
1087 u16
sdma_get_descq_cnt(void);
1089 extern uint mod_num_sdma
;
1091 void sdma_update_lmc(struct hfi1_devdata
*dd
, u64 mask
, u32 lid
);