2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
24 #include <linux/dmaengine.h>
29 extern int ioat_pending_level
;
30 extern int ioat_ring_alloc_order
;
33 * workaround for IOAT ver.3.0 null descriptor issue
34 * (channel returns error when size is 0)
36 #define NULL_DESC_BUFFER_SIZE 1
38 #define IOAT_MAX_ORDER 16
39 #define ioat_get_alloc_order() \
40 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
41 #define ioat_get_max_alloc_order() \
42 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
44 /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
45 * @base: common ioat channel parameters
46 * @xfercap_log; log2 of channel max transfer length (for fast division)
47 * @head: allocated index
48 * @issued: hardware notification point
49 * @tail: cleanup index
50 * @pending: lock free indicator for issued != head
51 * @dmacount: identical to 'head' except for occasionally resetting to zero
52 * @alloc_order: log2 of the number of allocated descriptors
53 * @ring: software ring buffer implementation of hardware ring
54 * @ring_lock: protects ring attributes
56 struct ioat2_dma_chan
{
57 struct ioat_chan_common base
;
65 struct ioat_ring_ent
**ring
;
69 static inline struct ioat2_dma_chan
*to_ioat2_chan(struct dma_chan
*c
)
71 struct ioat_chan_common
*chan
= to_chan_common(c
);
73 return container_of(chan
, struct ioat2_dma_chan
, base
);
76 static inline u16
ioat2_ring_mask(struct ioat2_dma_chan
*ioat
)
78 return (1 << ioat
->alloc_order
) - 1;
81 /* count of descriptors in flight with the engine */
82 static inline u16
ioat2_ring_active(struct ioat2_dma_chan
*ioat
)
84 return (ioat
->head
- ioat
->tail
) & ioat2_ring_mask(ioat
);
87 /* count of descriptors pending submission to hardware */
88 static inline u16
ioat2_ring_pending(struct ioat2_dma_chan
*ioat
)
90 return (ioat
->head
- ioat
->issued
) & ioat2_ring_mask(ioat
);
93 static inline u16
ioat2_ring_space(struct ioat2_dma_chan
*ioat
)
95 u16 num_descs
= ioat2_ring_mask(ioat
) + 1;
96 u16 active
= ioat2_ring_active(ioat
);
98 BUG_ON(active
> num_descs
);
100 return num_descs
- active
;
103 /* assumes caller already checked space */
104 static inline u16
ioat2_desc_alloc(struct ioat2_dma_chan
*ioat
, u16 len
)
107 return ioat
->head
- len
;
110 static inline u16
ioat2_xferlen_to_descs(struct ioat2_dma_chan
*ioat
, size_t len
)
112 u16 num_descs
= len
>> ioat
->xfercap_log
;
114 num_descs
+= !!(len
& ((1 << ioat
->xfercap_log
) - 1));
119 * struct ioat_ring_ent - wrapper around hardware descriptor
120 * @hw: hardware DMA descriptor (for memcpy)
121 * @fill: hardware fill descriptor
122 * @xor: hardware xor descriptor
123 * @xor_ex: hardware xor extension descriptor
124 * @pq: hardware pq descriptor
125 * @pq_ex: hardware pq extension descriptor
126 * @pqu: hardware pq update descriptor
127 * @raw: hardware raw (un-typed) descriptor
128 * @txd: the generic software descriptor for all engines
129 * @len: total transaction length for unmap
130 * @result: asynchronous result of validate operations
131 * @id: identifier for debug
134 struct ioat_ring_ent
{
136 struct ioat_dma_descriptor
*hw
;
137 struct ioat_fill_descriptor
*fill
;
138 struct ioat_xor_descriptor
*xor;
139 struct ioat_xor_ext_descriptor
*xor_ex
;
140 struct ioat_pq_descriptor
*pq
;
141 struct ioat_pq_ext_descriptor
*pq_ex
;
142 struct ioat_pq_update_descriptor
*pqu
;
143 struct ioat_raw_descriptor
*raw
;
146 struct dma_async_tx_descriptor txd
;
147 enum sum_check_flags
*result
;
153 static inline struct ioat_ring_ent
*
154 ioat2_get_ring_ent(struct ioat2_dma_chan
*ioat
, u16 idx
)
156 return ioat
->ring
[idx
& ioat2_ring_mask(ioat
)];
159 static inline void ioat2_set_chainaddr(struct ioat2_dma_chan
*ioat
, u64 addr
)
161 struct ioat_chan_common
*chan
= &ioat
->base
;
163 writel(addr
& 0x00000000FFFFFFFF,
164 chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
166 chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
169 int __devinit
ioat2_dma_probe(struct ioatdma_device
*dev
, int dca
);
170 int __devinit
ioat3_dma_probe(struct ioatdma_device
*dev
, int dca
);
171 struct dca_provider
* __devinit
ioat2_dca_init(struct pci_dev
*pdev
, void __iomem
*iobase
);
172 struct dca_provider
* __devinit
ioat3_dca_init(struct pci_dev
*pdev
, void __iomem
*iobase
);
173 int ioat2_alloc_and_lock(u16
*idx
, struct ioat2_dma_chan
*ioat
, int num_descs
);
174 int ioat2_enumerate_channels(struct ioatdma_device
*device
);
175 struct dma_async_tx_descriptor
*
176 ioat2_dma_prep_memcpy_lock(struct dma_chan
*c
, dma_addr_t dma_dest
,
177 dma_addr_t dma_src
, size_t len
, unsigned long flags
);
178 void ioat2_issue_pending(struct dma_chan
*chan
);
179 int ioat2_alloc_chan_resources(struct dma_chan
*c
);
180 void ioat2_free_chan_resources(struct dma_chan
*c
);
181 enum dma_status
ioat2_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
182 dma_cookie_t
*done
, dma_cookie_t
*used
);
183 void __ioat2_restart_chan(struct ioat2_dma_chan
*ioat
);
184 bool reshape_ring(struct ioat2_dma_chan
*ioat
, int order
);
185 void __ioat2_issue_pending(struct ioat2_dma_chan
*ioat
);
186 void ioat2_cleanup_tasklet(unsigned long data
);
187 void ioat2_timer_event(unsigned long data
);
188 extern struct kobj_type ioat2_ktype
;
189 extern struct kmem_cache
*ioat2_cache
;
190 #endif /* IOATDMA_V2_H */