4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 #if defined(_KERNEL) && defined(HAVE_QAT)
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/pagemap.h>
26 #include <linux/completion.h>
27 #include <sys/zfs_context.h>
28 #include <sys/byteorder.h>
33 * Max instances in a QAT device, each instance is a channel to submit
34 * jobs to QAT hardware, this is only for pre-allocating instance and
35 * session arrays; the actual number of instances are defined in the
36 * QAT driver's configuration file.
38 #define QAT_DC_MAX_INSTANCES 48
41 * ZLIB head and foot size
43 #define ZLIB_HEAD_SZ 2
44 #define ZLIB_FOOT_SZ 4
46 static CpaInstanceHandle dc_inst_handles
[QAT_DC_MAX_INSTANCES
];
47 static CpaDcSessionHandle session_handles
[QAT_DC_MAX_INSTANCES
];
48 static CpaBufferList
**buffer_array
[QAT_DC_MAX_INSTANCES
];
49 static Cpa16U num_inst
= 0;
50 static Cpa32U inst_num
= 0;
51 static boolean_t qat_dc_init_done
= B_FALSE
;
52 int zfs_qat_compress_disable
= 0;
55 qat_dc_use_accel(size_t s_len
)
57 return (!zfs_qat_compress_disable
&&
59 s_len
>= QAT_MIN_BUF_SIZE
&&
60 s_len
<= QAT_MAX_BUF_SIZE
);
64 qat_dc_callback(void *p_callback
, CpaStatus status
)
66 if (p_callback
!= NULL
)
67 complete((struct completion
*)p_callback
);
74 Cpa16U num_inter_buff_lists
= 0;
76 for (Cpa16U i
= 0; i
< num_inst
; i
++) {
77 cpaDcStopInstance(dc_inst_handles
[i
]);
78 QAT_PHYS_CONTIG_FREE(session_handles
[i
]);
79 /* free intermediate buffers */
80 if (buffer_array
[i
] != NULL
) {
81 cpaDcGetNumIntermediateBuffers(
82 dc_inst_handles
[i
], &num_inter_buff_lists
);
83 for (buff_num
= 0; buff_num
< num_inter_buff_lists
;
85 CpaBufferList
*buffer_inter
=
86 buffer_array
[i
][buff_num
];
87 if (buffer_inter
->pBuffers
) {
89 buffer_inter
->pBuffers
->pData
);
91 buffer_inter
->pBuffers
);
94 buffer_inter
->pPrivateMetaData
);
95 QAT_PHYS_CONTIG_FREE(buffer_inter
);
101 qat_dc_init_done
= B_FALSE
;
107 CpaStatus status
= CPA_STATUS_SUCCESS
;
108 Cpa32U sess_size
= 0;
110 Cpa16U num_inter_buff_lists
= 0;
112 Cpa32U buff_meta_size
= 0;
113 CpaDcSessionSetupData sd
= {0};
115 if (qat_dc_init_done
)
118 status
= cpaDcGetNumInstances(&num_inst
);
119 if (status
!= CPA_STATUS_SUCCESS
)
122 /* if the user has configured no QAT compression units just return */
126 if (num_inst
> QAT_DC_MAX_INSTANCES
)
127 num_inst
= QAT_DC_MAX_INSTANCES
;
129 status
= cpaDcGetInstances(num_inst
, &dc_inst_handles
[0]);
130 if (status
!= CPA_STATUS_SUCCESS
)
133 for (Cpa16U i
= 0; i
< num_inst
; i
++) {
134 cpaDcSetAddressTranslation(dc_inst_handles
[i
],
135 (void*)virt_to_phys
);
137 status
= cpaDcBufferListGetMetaSize(dc_inst_handles
[i
],
140 if (status
== CPA_STATUS_SUCCESS
)
141 status
= cpaDcGetNumIntermediateBuffers(
142 dc_inst_handles
[i
], &num_inter_buff_lists
);
144 if (status
== CPA_STATUS_SUCCESS
&& num_inter_buff_lists
!= 0)
145 status
= QAT_PHYS_CONTIG_ALLOC(&buffer_array
[i
],
146 num_inter_buff_lists
*
147 sizeof (CpaBufferList
*));
149 for (buff_num
= 0; buff_num
< num_inter_buff_lists
;
151 if (status
== CPA_STATUS_SUCCESS
)
152 status
= QAT_PHYS_CONTIG_ALLOC(
153 &buffer_array
[i
][buff_num
],
154 sizeof (CpaBufferList
));
156 if (status
== CPA_STATUS_SUCCESS
)
157 status
= QAT_PHYS_CONTIG_ALLOC(
158 &buffer_array
[i
][buff_num
]->
162 if (status
== CPA_STATUS_SUCCESS
)
163 status
= QAT_PHYS_CONTIG_ALLOC(
164 &buffer_array
[i
][buff_num
]->pBuffers
,
165 sizeof (CpaFlatBuffer
));
167 if (status
== CPA_STATUS_SUCCESS
) {
169 * implementation requires an intermediate
170 * buffer approximately twice the size of
171 * output buffer, which is 2x max buffer
174 status
= QAT_PHYS_CONTIG_ALLOC(
175 &buffer_array
[i
][buff_num
]->pBuffers
->
176 pData
, 2 * QAT_MAX_BUF_SIZE
);
177 if (status
!= CPA_STATUS_SUCCESS
)
180 buffer_array
[i
][buff_num
]->numBuffers
= 1;
181 buffer_array
[i
][buff_num
]->pBuffers
->
182 dataLenInBytes
= 2 * QAT_MAX_BUF_SIZE
;
186 status
= cpaDcStartInstance(dc_inst_handles
[i
],
187 num_inter_buff_lists
, buffer_array
[i
]);
188 if (status
!= CPA_STATUS_SUCCESS
)
191 sd
.compLevel
= CPA_DC_L1
;
192 sd
.compType
= CPA_DC_DEFLATE
;
193 sd
.huffType
= CPA_DC_HT_FULL_DYNAMIC
;
194 sd
.sessDirection
= CPA_DC_DIR_COMBINED
;
195 sd
.sessState
= CPA_DC_STATELESS
;
196 sd
.deflateWindowSize
= 7;
197 sd
.checksum
= CPA_DC_ADLER32
;
198 status
= cpaDcGetSessionSize(dc_inst_handles
[i
],
199 &sd
, &sess_size
, &ctx_size
);
200 if (status
!= CPA_STATUS_SUCCESS
)
203 QAT_PHYS_CONTIG_ALLOC(&session_handles
[i
], sess_size
);
204 if (session_handles
[i
] == NULL
)
207 status
= cpaDcInitSession(dc_inst_handles
[i
],
209 &sd
, NULL
, qat_dc_callback
);
210 if (status
!= CPA_STATUS_SUCCESS
)
214 qat_dc_init_done
= B_TRUE
;
224 if (!qat_dc_init_done
)
231 * The "add" parameter is an additional buffer which is passed
232 * to QAT as a scratch buffer alongside the destination buffer
233 * in case the "compressed" data ends up being larger than the
234 * original source data. This is necessary to prevent QAT from
235 * generating buffer overflow warnings for incompressible data.
238 qat_compress_impl(qat_compress_dir_t dir
, char *src
, int src_len
,
239 char *dst
, int dst_len
, char *add
, int add_len
, size_t *c_len
)
241 CpaInstanceHandle dc_inst_handle
;
242 CpaDcSessionHandle session_handle
;
243 CpaBufferList
*buf_list_src
= NULL
;
244 CpaBufferList
*buf_list_dst
= NULL
;
245 CpaFlatBuffer
*flat_buf_src
= NULL
;
246 CpaFlatBuffer
*flat_buf_dst
= NULL
;
247 Cpa8U
*buffer_meta_src
= NULL
;
248 Cpa8U
*buffer_meta_dst
= NULL
;
249 Cpa32U buffer_meta_size
= 0;
250 CpaDcRqResults dc_results
;
251 CpaStatus status
= CPA_STATUS_FAIL
;
253 Cpa32U compressed_sz
;
254 Cpa32U num_src_buf
= (src_len
>> PAGE_SHIFT
) + 2;
255 Cpa32U num_dst_buf
= (dst_len
>> PAGE_SHIFT
) + 2;
256 Cpa32U num_add_buf
= (add_len
>> PAGE_SHIFT
) + 2;
258 Cpa32U dst_pages
= 0;
262 struct page
**in_pages
= NULL
;
263 struct page
**out_pages
= NULL
;
264 struct page
**add_pages
= NULL
;
266 struct completion complete
;
271 * We increment num_src_buf and num_dst_buf by 2 to allow
272 * us to handle non page-aligned buffer addresses and buffers
273 * whose sizes are not divisible by PAGE_SIZE.
275 Cpa32U src_buffer_list_mem_size
= sizeof (CpaBufferList
) +
276 (num_src_buf
* sizeof (CpaFlatBuffer
));
277 Cpa32U dst_buffer_list_mem_size
= sizeof (CpaBufferList
) +
278 ((num_dst_buf
+ num_add_buf
) * sizeof (CpaFlatBuffer
));
280 status
= QAT_PHYS_CONTIG_ALLOC(&in_pages
,
281 num_src_buf
* sizeof (struct page
*));
282 if (status
!= CPA_STATUS_SUCCESS
)
285 status
= QAT_PHYS_CONTIG_ALLOC(&out_pages
,
286 num_dst_buf
* sizeof (struct page
*));
287 if (status
!= CPA_STATUS_SUCCESS
)
290 status
= QAT_PHYS_CONTIG_ALLOC(&add_pages
,
291 num_add_buf
* sizeof (struct page
*));
292 if (status
!= CPA_STATUS_SUCCESS
)
295 i
= (Cpa32U
)atomic_inc_32_nv(&inst_num
) % num_inst
;
296 dc_inst_handle
= dc_inst_handles
[i
];
297 session_handle
= session_handles
[i
];
299 cpaDcBufferListGetMetaSize(dc_inst_handle
, num_src_buf
,
301 status
= QAT_PHYS_CONTIG_ALLOC(&buffer_meta_src
, buffer_meta_size
);
302 if (status
!= CPA_STATUS_SUCCESS
)
305 cpaDcBufferListGetMetaSize(dc_inst_handle
, num_dst_buf
+ num_add_buf
,
307 status
= QAT_PHYS_CONTIG_ALLOC(&buffer_meta_dst
, buffer_meta_size
);
308 if (status
!= CPA_STATUS_SUCCESS
)
311 /* build source buffer list */
312 status
= QAT_PHYS_CONTIG_ALLOC(&buf_list_src
, src_buffer_list_mem_size
);
313 if (status
!= CPA_STATUS_SUCCESS
)
316 flat_buf_src
= (CpaFlatBuffer
*)(buf_list_src
+ 1);
318 buf_list_src
->pBuffers
= flat_buf_src
; /* always point to first one */
320 /* build destination buffer list */
321 status
= QAT_PHYS_CONTIG_ALLOC(&buf_list_dst
, dst_buffer_list_mem_size
);
322 if (status
!= CPA_STATUS_SUCCESS
)
325 flat_buf_dst
= (CpaFlatBuffer
*)(buf_list_dst
+ 1);
327 buf_list_dst
->pBuffers
= flat_buf_dst
; /* always point to first one */
329 buf_list_src
->numBuffers
= 0;
330 buf_list_src
->pPrivateMetaData
= buffer_meta_src
;
331 bytes_left
= src_len
;
334 while (bytes_left
> 0) {
335 page_off
= ((long)data
& ~PAGE_MASK
);
336 page
= qat_mem_to_page(data
);
337 in_pages
[page_num
] = page
;
338 flat_buf_src
->pData
= kmap(page
) + page_off
;
339 flat_buf_src
->dataLenInBytes
=
340 min((long)PAGE_SIZE
- page_off
, (long)bytes_left
);
342 bytes_left
-= flat_buf_src
->dataLenInBytes
;
343 data
+= flat_buf_src
->dataLenInBytes
;
345 buf_list_src
->numBuffers
++;
349 buf_list_dst
->numBuffers
= 0;
350 buf_list_dst
->pPrivateMetaData
= buffer_meta_dst
;
351 bytes_left
= dst_len
;
354 while (bytes_left
> 0) {
355 page_off
= ((long)data
& ~PAGE_MASK
);
356 page
= qat_mem_to_page(data
);
357 flat_buf_dst
->pData
= kmap(page
) + page_off
;
358 out_pages
[page_num
] = page
;
359 flat_buf_dst
->dataLenInBytes
=
360 min((long)PAGE_SIZE
- page_off
, (long)bytes_left
);
362 bytes_left
-= flat_buf_dst
->dataLenInBytes
;
363 data
+= flat_buf_dst
->dataLenInBytes
;
365 buf_list_dst
->numBuffers
++;
370 /* map additional scratch pages into the destination buffer list */
371 bytes_left
= add_len
;
374 while (bytes_left
> 0) {
375 page_off
= ((long)data
& ~PAGE_MASK
);
376 page
= qat_mem_to_page(data
);
377 flat_buf_dst
->pData
= kmap(page
) + page_off
;
378 add_pages
[page_num
] = page
;
379 flat_buf_dst
->dataLenInBytes
=
380 min((long)PAGE_SIZE
- page_off
, (long)bytes_left
);
382 bytes_left
-= flat_buf_dst
->dataLenInBytes
;
383 data
+= flat_buf_dst
->dataLenInBytes
;
385 buf_list_dst
->numBuffers
++;
389 init_completion(&complete
);
391 if (dir
== QAT_COMPRESS
) {
392 QAT_STAT_BUMP(comp_requests
);
393 QAT_STAT_INCR(comp_total_in_bytes
, src_len
);
395 cpaDcGenerateHeader(session_handle
,
396 buf_list_dst
->pBuffers
, &hdr_sz
);
397 buf_list_dst
->pBuffers
->pData
+= hdr_sz
;
398 buf_list_dst
->pBuffers
->dataLenInBytes
-= hdr_sz
;
399 status
= cpaDcCompressData(
400 dc_inst_handle
, session_handle
,
401 buf_list_src
, buf_list_dst
,
402 &dc_results
, CPA_DC_FLUSH_FINAL
,
404 if (status
!= CPA_STATUS_SUCCESS
) {
408 /* we now wait until the completion of the operation. */
409 wait_for_completion(&complete
);
411 if (dc_results
.status
!= CPA_STATUS_SUCCESS
) {
412 status
= CPA_STATUS_FAIL
;
416 compressed_sz
= dc_results
.produced
;
417 if (compressed_sz
+ hdr_sz
+ ZLIB_FOOT_SZ
> dst_len
) {
418 status
= CPA_STATUS_INCOMPRESSIBLE
;
422 /* get adler32 checksum and append footer */
423 *(Cpa32U
*)(dst
+ hdr_sz
+ compressed_sz
) =
424 BSWAP_32(dc_results
.checksum
);
426 *c_len
= hdr_sz
+ compressed_sz
+ ZLIB_FOOT_SZ
;
427 QAT_STAT_INCR(comp_total_out_bytes
, *c_len
);
429 ASSERT3U(dir
, ==, QAT_DECOMPRESS
);
430 QAT_STAT_BUMP(decomp_requests
);
431 QAT_STAT_INCR(decomp_total_in_bytes
, src_len
);
433 buf_list_src
->pBuffers
->pData
+= ZLIB_HEAD_SZ
;
434 buf_list_src
->pBuffers
->dataLenInBytes
-= ZLIB_HEAD_SZ
;
435 status
= cpaDcDecompressData(dc_inst_handle
, session_handle
,
436 buf_list_src
, buf_list_dst
, &dc_results
, CPA_DC_FLUSH_FINAL
,
439 if (CPA_STATUS_SUCCESS
!= status
) {
440 status
= CPA_STATUS_FAIL
;
444 /* we now wait until the completion of the operation. */
445 wait_for_completion(&complete
);
447 if (dc_results
.status
!= CPA_STATUS_SUCCESS
) {
448 status
= CPA_STATUS_FAIL
;
452 /* verify adler checksum */
453 adler32
= *(Cpa32U
*)(src
+ dc_results
.consumed
+ ZLIB_HEAD_SZ
);
454 if (adler32
!= BSWAP_32(dc_results
.checksum
)) {
455 status
= CPA_STATUS_FAIL
;
458 *c_len
= dc_results
.produced
;
459 QAT_STAT_INCR(decomp_total_out_bytes
, *c_len
);
463 if (status
!= CPA_STATUS_SUCCESS
&& status
!= CPA_STATUS_INCOMPRESSIBLE
)
464 QAT_STAT_BUMP(dc_fails
);
468 page_num
< buf_list_src
->numBuffers
;
470 kunmap(in_pages
[page_num
]);
472 QAT_PHYS_CONTIG_FREE(in_pages
);
476 for (page_num
= 0; page_num
< dst_pages
; page_num
++) {
477 kunmap(out_pages
[page_num
]);
479 QAT_PHYS_CONTIG_FREE(out_pages
);
484 page_num
< buf_list_dst
->numBuffers
- dst_pages
;
486 kunmap(add_pages
[page_num
]);
488 QAT_PHYS_CONTIG_FREE(add_pages
);
491 QAT_PHYS_CONTIG_FREE(buffer_meta_src
);
492 QAT_PHYS_CONTIG_FREE(buffer_meta_dst
);
493 QAT_PHYS_CONTIG_FREE(buf_list_src
);
494 QAT_PHYS_CONTIG_FREE(buf_list_dst
);
500 * Entry point for QAT accelerated compression / decompression.
503 qat_compress(qat_compress_dir_t dir
, char *src
, int src_len
,
504 char *dst
, int dst_len
, size_t *c_len
)
510 if (dir
== QAT_COMPRESS
) {
512 add
= zio_data_buf_alloc(add_len
);
515 ret
= qat_compress_impl(dir
, src
, src_len
, dst
,
516 dst_len
, add
, add_len
, c_len
);
518 if (dir
== QAT_COMPRESS
)
519 zio_data_buf_free(add
, add_len
);
525 param_set_qat_compress(const char *val
, zfs_kernel_param_t
*kp
)
528 int *pvalue
= kp
->arg
;
529 ret
= param_set_int(val
, kp
);
533 * zfs_qat_compress_disable = 0: enable qat compress
534 * try to initialize qat instance if it has not been done
536 if (*pvalue
== 0 && !qat_dc_init_done
) {
539 zfs_qat_compress_disable
= 1;
546 module_param_call(zfs_qat_compress_disable
, param_set_qat_compress
,
547 param_get_int
, &zfs_qat_compress_disable
, 0644);
548 MODULE_PARM_DESC(zfs_qat_compress_disable
, "Enable/Disable QAT compression");