2 * CAAM/SEC 4.x functions for using scatterlists in caam driver
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
11 * convert single dma address to h/w link table format
13 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry
*sec4_sg_ptr
,
14 dma_addr_t dma
, u32 len
, u32 offset
)
16 sec4_sg_ptr
->ptr
= dma
;
17 sec4_sg_ptr
->len
= len
;
18 sec4_sg_ptr
->reserved
= 0;
19 sec4_sg_ptr
->buf_pool_id
= 0;
20 sec4_sg_ptr
->offset
= offset
;
22 print_hex_dump(KERN_ERR
, "sec4_sg_ptr@: ",
23 DUMP_PREFIX_ADDRESS
, 16, 4, sec4_sg_ptr
,
24 sizeof(struct sec4_sg_entry
), 1);
29 * convert scatterlist to h/w link table format
30 * but does not have final bit; instead, returns last entry
32 static inline struct sec4_sg_entry
*
33 sg_to_sec4_sg(struct scatterlist
*sg
, int sg_count
,
34 struct sec4_sg_entry
*sec4_sg_ptr
, u32 offset
)
37 dma_to_sec4_sg_one(sec4_sg_ptr
, sg_dma_address(sg
),
38 sg_dma_len(sg
), offset
);
40 sg
= scatterwalk_sg_next(sg
);
43 return sec4_sg_ptr
- 1;
47 * convert scatterlist to h/w link table format
48 * scatterlist must have been previously dma mapped
50 static inline void sg_to_sec4_sg_last(struct scatterlist
*sg
, int sg_count
,
51 struct sec4_sg_entry
*sec4_sg_ptr
,
54 sec4_sg_ptr
= sg_to_sec4_sg(sg
, sg_count
, sec4_sg_ptr
, offset
);
55 sec4_sg_ptr
->len
|= SEC4_SG_LEN_FIN
;
58 /* count number of elements in scatterlist */
59 static inline int __sg_count(struct scatterlist
*sg_list
, int nbytes
,
62 struct scatterlist
*sg
= sg_list
;
68 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
70 sg
= scatterwalk_sg_next(sg
);
76 /* derive number of elements in scatterlist, but return 0 for 1 */
77 static inline int sg_count(struct scatterlist
*sg_list
, int nbytes
,
80 int sg_nents
= __sg_count(sg_list
, nbytes
, chained
);
82 if (likely(sg_nents
== 1))
88 static int dma_map_sg_chained(struct device
*dev
, struct scatterlist
*sg
,
89 unsigned int nents
, enum dma_data_direction dir
,
92 if (unlikely(chained
)) {
94 for (i
= 0; i
< nents
; i
++) {
95 dma_map_sg(dev
, sg
, 1, dir
);
96 sg
= scatterwalk_sg_next(sg
);
99 dma_map_sg(dev
, sg
, nents
, dir
);
104 static int dma_unmap_sg_chained(struct device
*dev
, struct scatterlist
*sg
,
105 unsigned int nents
, enum dma_data_direction dir
,
108 if (unlikely(chained
)) {
110 for (i
= 0; i
< nents
; i
++) {
111 dma_unmap_sg(dev
, sg
, 1, dir
);
112 sg
= scatterwalk_sg_next(sg
);
115 dma_unmap_sg(dev
, sg
, nents
, dir
);
120 /* Map SG page in kernel virtual address space and copy */
121 static inline void sg_map_copy(u8
*dest
, struct scatterlist
*sg
,
127 * Page here can be user-space pinned using get_user_pages
128 * Same must be kmapped before use and kunmapped subsequently
130 mapped_addr
= kmap_atomic(sg_page(sg
));
131 memcpy(dest
, mapped_addr
+ offset
, len
);
132 kunmap_atomic(mapped_addr
);
135 /* Copy from len bytes of sg to dest, starting from beginning */
136 static inline void sg_copy(u8
*dest
, struct scatterlist
*sg
, unsigned int len
)
138 struct scatterlist
*current_sg
= sg
;
139 int cpy_index
= 0, next_cpy_index
= current_sg
->length
;
141 while (next_cpy_index
< len
) {
142 sg_map_copy(dest
+ cpy_index
, current_sg
, current_sg
->length
,
144 current_sg
= scatterwalk_sg_next(current_sg
);
145 cpy_index
= next_cpy_index
;
146 next_cpy_index
+= current_sg
->length
;
149 sg_map_copy(dest
+ cpy_index
, current_sg
, len
-cpy_index
,
153 /* Copy sg data, from to_skip to end, to dest */
154 static inline void sg_copy_part(u8
*dest
, struct scatterlist
*sg
,
155 int to_skip
, unsigned int end
)
157 struct scatterlist
*current_sg
= sg
;
158 int sg_index
, cpy_index
, offset
;
160 sg_index
= current_sg
->length
;
161 while (sg_index
<= to_skip
) {
162 current_sg
= scatterwalk_sg_next(current_sg
);
163 sg_index
+= current_sg
->length
;
165 cpy_index
= sg_index
- to_skip
;
166 offset
= current_sg
->offset
+ current_sg
->length
- cpy_index
;
167 sg_map_copy(dest
, current_sg
, cpy_index
, offset
);
168 if (end
- sg_index
) {
169 current_sg
= scatterwalk_sg_next(current_sg
);
170 sg_copy(dest
+ cpy_index
, current_sg
, end
- sg_index
);