1 /***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
44 ***********************license end**************************************/
47 #include "zip_crypto.h"
49 #define DRV_NAME "ThunderX-ZIP"
51 static struct zip_device
*zip_dev
[MAX_ZIP_DEVICES
];
53 static const struct pci_device_id zip_id_table
[] = {
54 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVICE_ID_THUNDERX_ZIP
) },
58 void zip_reg_write(u64 val
, u64 __iomem
*addr
)
63 u64
zip_reg_read(u64 __iomem
*addr
)
69 * Allocates new ZIP device structure
70 * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
72 static struct zip_device
*zip_alloc_device(struct pci_dev
*pdev
)
74 struct zip_device
*zip
= NULL
;
77 for (idx
= 0; idx
< MAX_ZIP_DEVICES
; idx
++) {
82 /* To ensure that the index is within the limit */
83 if (idx
< MAX_ZIP_DEVICES
)
84 zip
= devm_kzalloc(&pdev
->dev
, sizeof(*zip
), GFP_KERNEL
);
95 * zip_get_device - Get ZIP device based on node id of cpu
97 * @node: Node id of the current cpu
98 * Return: Pointer to Zip device structure
100 struct zip_device
*zip_get_device(int node
)
102 if ((node
< MAX_ZIP_DEVICES
) && (node
>= 0))
103 return zip_dev
[node
];
105 zip_err("ZIP device not found for node id %d\n", node
);
110 * zip_get_node_id - Get the node id of the current cpu
112 * Return: Node id of the current cpu
114 int zip_get_node_id(void)
116 return cpu_to_node(raw_smp_processor_id());
119 /* Initializes the ZIP h/w sub-system */
120 static int zip_init_hw(struct zip_device
*zip
)
122 union zip_cmd_ctl cmd_ctl
;
123 union zip_constants constants
;
124 union zip_que_ena que_ena
;
125 union zip_quex_map que_map
;
126 union zip_que_pri que_pri
;
128 union zip_quex_sbuf_addr que_sbuf_addr
;
129 union zip_quex_sbuf_ctl que_sbuf_ctl
;
133 /* Enable the ZIP Engine(Core) Clock */
134 cmd_ctl
.u_reg64
= zip_reg_read(zip
->reg_base
+ ZIP_CMD_CTL
);
135 cmd_ctl
.s
.forceclk
= 1;
136 zip_reg_write(cmd_ctl
.u_reg64
& 0xFF, (zip
->reg_base
+ ZIP_CMD_CTL
));
138 zip_msg("ZIP_CMD_CTL : 0x%016llx",
139 zip_reg_read(zip
->reg_base
+ ZIP_CMD_CTL
));
141 constants
.u_reg64
= zip_reg_read(zip
->reg_base
+ ZIP_CONSTANTS
);
142 zip
->depth
= constants
.s
.depth
;
143 zip
->onfsize
= constants
.s
.onfsize
;
144 zip
->ctxsize
= constants
.s
.ctxsize
;
146 zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
147 zip
->depth
, zip
->onfsize
, zip
->ctxsize
);
150 * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
151 * have the correct buffer pointer and size configured for each
154 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++) {
155 que_sbuf_ctl
.u_reg64
= 0ull;
156 que_sbuf_ctl
.s
.size
= (ZIP_CMD_QBUF_SIZE
/ sizeof(u64
));
157 que_sbuf_ctl
.s
.inst_be
= 0;
158 que_sbuf_ctl
.s
.stream_id
= 0;
159 zip_reg_write(que_sbuf_ctl
.u_reg64
,
160 (zip
->reg_base
+ ZIP_QUEX_SBUF_CTL(q
)));
162 zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q
,
163 zip_reg_read(zip
->reg_base
+ ZIP_QUEX_SBUF_CTL(q
)));
166 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++) {
167 memset(&zip
->iq
[q
], 0x0, sizeof(struct zip_iq
));
169 spin_lock_init(&zip
->iq
[q
].lock
);
171 if (zip_cmd_qbuf_alloc(zip
, q
)) {
174 zip_cmd_qbuf_free(zip
, q
);
179 /* Initialize tail ptr to head */
180 zip
->iq
[q
].sw_tail
= zip
->iq
[q
].sw_head
;
181 zip
->iq
[q
].hw_tail
= zip
->iq
[q
].sw_head
;
183 /* Write the physical addr to register */
184 que_sbuf_addr
.u_reg64
= 0ull;
185 que_sbuf_addr
.s
.ptr
= (__pa(zip
->iq
[q
].sw_head
) >>
188 zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q
,
189 (u64
)que_sbuf_addr
.s
.ptr
);
191 zip_reg_write(que_sbuf_addr
.u_reg64
,
192 (zip
->reg_base
+ ZIP_QUEX_SBUF_ADDR(q
)));
194 zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q
,
195 zip_reg_read(zip
->reg_base
+ ZIP_QUEX_SBUF_ADDR(q
)));
197 zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
198 zip
->iq
[q
].sw_head
, zip
->iq
[q
].sw_tail
,
200 zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr
.s
.ptr
);
204 * Queue-to-ZIP core mapping
205 * If a queue is not mapped to a particular core, it is equivalent to
206 * the ZIP core being disabled.
208 que_ena
.u_reg64
= 0x0ull
;
209 /* Enabling queues based on ZIP_NUM_QUEUES */
210 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++)
211 que_ena
.s
.ena
|= (0x1 << q
);
212 zip_reg_write(que_ena
.u_reg64
, (zip
->reg_base
+ ZIP_QUE_ENA
));
214 zip_msg("QUE_ENA : 0x%016llx",
215 zip_reg_read(zip
->reg_base
+ ZIP_QUE_ENA
));
217 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++) {
218 que_map
.u_reg64
= 0ull;
219 /* Mapping each queue to two ZIP cores */
221 zip_reg_write(que_map
.u_reg64
,
222 (zip
->reg_base
+ ZIP_QUEX_MAP(q
)));
224 zip_msg("QUE_MAP(%d) : 0x%016llx", q
,
225 zip_reg_read(zip
->reg_base
+ ZIP_QUEX_MAP(q
)));
228 que_pri
.u_reg64
= 0ull;
229 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++)
230 que_pri
.s
.pri
|= (0x1 << q
); /* Higher Priority RR */
231 zip_reg_write(que_pri
.u_reg64
, (zip
->reg_base
+ ZIP_QUE_PRI
));
233 zip_msg("QUE_PRI %016llx", zip_reg_read(zip
->reg_base
+ ZIP_QUE_PRI
));
238 static int zip_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
240 struct device
*dev
= &pdev
->dev
;
241 struct zip_device
*zip
= NULL
;
244 zip
= zip_alloc_device(pdev
);
248 dev_info(dev
, "Found ZIP device %d %x:%x on Node %d\n", zip
->index
,
249 pdev
->vendor
, pdev
->device
, dev_to_node(dev
));
251 pci_set_drvdata(pdev
, zip
);
254 err
= pci_enable_device(pdev
);
256 dev_err(dev
, "Failed to enable PCI device");
257 goto err_free_device
;
260 err
= pci_request_regions(pdev
, DRV_NAME
);
262 dev_err(dev
, "PCI request regions failed 0x%x", err
);
263 goto err_disable_device
;
266 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
268 dev_err(dev
, "Unable to get usable DMA configuration\n");
269 goto err_release_regions
;
272 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
274 dev_err(dev
, "Unable to get 48-bit DMA for allocations\n");
275 goto err_release_regions
;
278 /* MAP configuration registers */
279 zip
->reg_base
= pci_ioremap_bar(pdev
, PCI_CFG_ZIP_PF_BAR0
);
280 if (!zip
->reg_base
) {
281 dev_err(dev
, "ZIP: Cannot map BAR0 CSR memory space, aborting");
283 goto err_release_regions
;
286 /* Initialize ZIP Hardware */
287 err
= zip_init_hw(zip
);
289 goto err_release_regions
;
295 iounmap(zip
->reg_base
);
296 pci_release_regions(pdev
);
299 pci_disable_device(pdev
);
302 pci_set_drvdata(pdev
, NULL
);
304 /* Remove zip_dev from zip_device list, free the zip_device memory */
305 zip_dev
[zip
->index
] = NULL
;
306 devm_kfree(dev
, zip
);
311 static void zip_remove(struct pci_dev
*pdev
)
313 struct zip_device
*zip
= pci_get_drvdata(pdev
);
314 union zip_cmd_ctl cmd_ctl
;
321 cmd_ctl
.u_reg64
= 0x0ull
;
322 cmd_ctl
.s
.reset
= 1; /* Forces ZIP cores to do reset */
323 zip_reg_write(cmd_ctl
.u_reg64
, (zip
->reg_base
+ ZIP_CMD_CTL
));
324 iounmap(zip
->reg_base
);
327 pci_release_regions(pdev
);
328 pci_disable_device(pdev
);
331 * Free Command Queue buffers. This free should be called for all
332 * the enabled Queues.
334 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++)
335 zip_cmd_qbuf_free(zip
, q
);
337 pci_set_drvdata(pdev
, NULL
);
338 /* remove zip device from zip device list */
339 zip_dev
[zip
->index
] = NULL
;
342 /* PCI Sub-System Interface */
343 static struct pci_driver zip_driver
= {
345 .id_table
= zip_id_table
,
347 .remove
= zip_remove
,
350 /* Kernel Crypto Subsystem Interface */
352 static struct crypto_alg zip_comp_deflate
= {
353 .cra_name
= "deflate",
354 .cra_driver_name
= "deflate-cavium",
355 .cra_flags
= CRYPTO_ALG_TYPE_COMPRESS
,
356 .cra_ctxsize
= sizeof(struct zip_kernel_ctx
),
358 .cra_module
= THIS_MODULE
,
359 .cra_init
= zip_alloc_comp_ctx_deflate
,
360 .cra_exit
= zip_free_comp_ctx
,
361 .cra_u
= { .compress
= {
362 .coa_compress
= zip_comp_compress
,
363 .coa_decompress
= zip_comp_decompress
367 static struct crypto_alg zip_comp_lzs
= {
369 .cra_driver_name
= "lzs-cavium",
370 .cra_flags
= CRYPTO_ALG_TYPE_COMPRESS
,
371 .cra_ctxsize
= sizeof(struct zip_kernel_ctx
),
373 .cra_module
= THIS_MODULE
,
374 .cra_init
= zip_alloc_comp_ctx_lzs
,
375 .cra_exit
= zip_free_comp_ctx
,
376 .cra_u
= { .compress
= {
377 .coa_compress
= zip_comp_compress
,
378 .coa_decompress
= zip_comp_decompress
382 static struct scomp_alg zip_scomp_deflate
= {
383 .alloc_ctx
= zip_alloc_scomp_ctx_deflate
,
384 .free_ctx
= zip_free_scomp_ctx
,
385 .compress
= zip_scomp_compress
,
386 .decompress
= zip_scomp_decompress
,
388 .cra_name
= "deflate",
389 .cra_driver_name
= "deflate-scomp-cavium",
390 .cra_module
= THIS_MODULE
,
395 static struct scomp_alg zip_scomp_lzs
= {
396 .alloc_ctx
= zip_alloc_scomp_ctx_lzs
,
397 .free_ctx
= zip_free_scomp_ctx
,
398 .compress
= zip_scomp_compress
,
399 .decompress
= zip_scomp_decompress
,
402 .cra_driver_name
= "lzs-scomp-cavium",
403 .cra_module
= THIS_MODULE
,
408 static int zip_register_compression_device(void)
412 ret
= crypto_register_alg(&zip_comp_deflate
);
414 zip_err("Deflate algorithm registration failed\n");
418 ret
= crypto_register_alg(&zip_comp_lzs
);
420 zip_err("LZS algorithm registration failed\n");
421 goto err_unregister_alg_deflate
;
424 ret
= crypto_register_scomp(&zip_scomp_deflate
);
426 zip_err("Deflate scomp algorithm registration failed\n");
427 goto err_unregister_alg_lzs
;
430 ret
= crypto_register_scomp(&zip_scomp_lzs
);
432 zip_err("LZS scomp algorithm registration failed\n");
433 goto err_unregister_scomp_deflate
;
438 err_unregister_scomp_deflate
:
439 crypto_unregister_scomp(&zip_scomp_deflate
);
440 err_unregister_alg_lzs
:
441 crypto_unregister_alg(&zip_comp_lzs
);
442 err_unregister_alg_deflate
:
443 crypto_unregister_alg(&zip_comp_deflate
);
448 static void zip_unregister_compression_device(void)
450 crypto_unregister_alg(&zip_comp_deflate
);
451 crypto_unregister_alg(&zip_comp_lzs
);
452 crypto_unregister_scomp(&zip_scomp_deflate
);
453 crypto_unregister_scomp(&zip_scomp_lzs
);
459 #ifdef CONFIG_DEBUG_FS
460 #include <linux/debugfs.h>
462 /* Displays ZIP device statistics */
463 static int zip_show_stats(struct seq_file
*s
, void *unused
)
466 u64 avg_chunk
= 0ull, avg_cr
= 0ull;
470 struct zip_device
*zip
;
471 struct zip_stats
*st
;
473 for (index
= 0; index
< MAX_ZIP_DEVICES
; index
++) {
476 if (zip_dev
[index
]) {
477 zip
= zip_dev
[index
];
480 /* Get all the pending requests */
481 for (q
= 0; q
< ZIP_NUM_QUEUES
; q
++) {
482 val
= zip_reg_read((zip
->reg_base
+
483 ZIP_DBG_QUEX_STA(q
)));
484 pending
+= val
>> 32 & 0xffffff;
487 val
= atomic64_read(&st
->comp_req_complete
);
488 avg_chunk
= (val
) ? atomic64_read(&st
->comp_in_bytes
) / val
: 0;
490 val
= atomic64_read(&st
->comp_out_bytes
);
491 avg_cr
= (val
) ? atomic64_read(&st
->comp_in_bytes
) / val
: 0;
492 seq_printf(s
, " ZIP Device %d Stats\n"
493 "-----------------------------------\n"
494 "Comp Req Submitted : \t%lld\n"
495 "Comp Req Completed : \t%lld\n"
496 "Compress In Bytes : \t%lld\n"
497 "Compressed Out Bytes : \t%lld\n"
498 "Average Chunk size : \t%llu\n"
499 "Average Compression ratio : \t%llu\n"
500 "Decomp Req Submitted : \t%lld\n"
501 "Decomp Req Completed : \t%lld\n"
502 "Decompress In Bytes : \t%lld\n"
503 "Decompressed Out Bytes : \t%lld\n"
504 "Decompress Bad requests : \t%lld\n"
505 "Pending Req : \t%lld\n"
506 "---------------------------------\n",
508 (u64
)atomic64_read(&st
->comp_req_submit
),
509 (u64
)atomic64_read(&st
->comp_req_complete
),
510 (u64
)atomic64_read(&st
->comp_in_bytes
),
511 (u64
)atomic64_read(&st
->comp_out_bytes
),
514 (u64
)atomic64_read(&st
->decomp_req_submit
),
515 (u64
)atomic64_read(&st
->decomp_req_complete
),
516 (u64
)atomic64_read(&st
->decomp_in_bytes
),
517 (u64
)atomic64_read(&st
->decomp_out_bytes
),
518 (u64
)atomic64_read(&st
->decomp_bad_reqs
),
525 /* Clears stats data */
526 static int zip_clear_stats(struct seq_file
*s
, void *unused
)
530 for (index
= 0; index
< MAX_ZIP_DEVICES
; index
++) {
531 if (zip_dev
[index
]) {
532 memset(&zip_dev
[index
]->stats
, 0,
533 sizeof(struct zip_stats
));
534 seq_printf(s
, "Cleared stats for zip %d\n", index
);
541 static struct zip_registers zipregs
[64] = {
542 {"ZIP_CMD_CTL ", 0x0000ull
},
543 {"ZIP_THROTTLE ", 0x0010ull
},
544 {"ZIP_CONSTANTS ", 0x00A0ull
},
545 {"ZIP_QUE0_MAP ", 0x1400ull
},
546 {"ZIP_QUE1_MAP ", 0x1408ull
},
547 {"ZIP_QUE_ENA ", 0x0500ull
},
548 {"ZIP_QUE_PRI ", 0x0508ull
},
549 {"ZIP_QUE0_DONE ", 0x2000ull
},
550 {"ZIP_QUE1_DONE ", 0x2008ull
},
551 {"ZIP_QUE0_DOORBELL ", 0x4000ull
},
552 {"ZIP_QUE1_DOORBELL ", 0x4008ull
},
553 {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull
},
554 {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull
},
555 {"ZIP_QUE0_SBUF_CTL ", 0x1200ull
},
556 {"ZIP_QUE1_SBUF_CTL ", 0x1208ull
},
560 /* Prints registers' contents */
561 static int zip_print_regs(struct seq_file
*s
, void *unused
)
564 int i
= 0, index
= 0;
566 for (index
= 0; index
< MAX_ZIP_DEVICES
; index
++) {
567 if (zip_dev
[index
]) {
568 seq_printf(s
, "--------------------------------\n"
569 " ZIP Device %d Registers\n"
570 "--------------------------------\n",
575 while (zipregs
[i
].reg_name
) {
576 val
= zip_reg_read((zip_dev
[index
]->reg_base
+
577 zipregs
[i
].reg_offset
));
578 seq_printf(s
, "%s: 0x%016llx\n",
579 zipregs
[i
].reg_name
, val
);
587 static int zip_stats_open(struct inode
*inode
, struct file
*file
)
589 return single_open(file
, zip_show_stats
, NULL
);
592 static const struct file_operations zip_stats_fops
= {
593 .owner
= THIS_MODULE
,
594 .open
= zip_stats_open
,
596 .release
= single_release
,
599 static int zip_clear_open(struct inode
*inode
, struct file
*file
)
601 return single_open(file
, zip_clear_stats
, NULL
);
604 static const struct file_operations zip_clear_fops
= {
605 .owner
= THIS_MODULE
,
606 .open
= zip_clear_open
,
608 .release
= single_release
,
611 static int zip_regs_open(struct inode
*inode
, struct file
*file
)
613 return single_open(file
, zip_print_regs
, NULL
);
616 static const struct file_operations zip_regs_fops
= {
617 .owner
= THIS_MODULE
,
618 .open
= zip_regs_open
,
620 .release
= single_release
,
623 /* Root directory for thunderx_zip debugfs entry */
624 static struct dentry
*zip_debugfs_root
;
626 static int __init
zip_debugfs_init(void)
628 struct dentry
*zip_stats
, *zip_clear
, *zip_regs
;
630 if (!debugfs_initialized())
633 zip_debugfs_root
= debugfs_create_dir("thunderx_zip", NULL
);
634 if (!zip_debugfs_root
)
637 /* Creating files for entries inside thunderx_zip directory */
638 zip_stats
= debugfs_create_file("zip_stats", 0444,
640 NULL
, &zip_stats_fops
);
642 goto failed_to_create
;
644 zip_clear
= debugfs_create_file("zip_clear", 0444,
646 NULL
, &zip_clear_fops
);
648 goto failed_to_create
;
650 zip_regs
= debugfs_create_file("zip_regs", 0444,
652 NULL
, &zip_regs_fops
);
654 goto failed_to_create
;
659 debugfs_remove_recursive(zip_debugfs_root
);
663 static void __exit
zip_debugfs_exit(void)
665 debugfs_remove_recursive(zip_debugfs_root
);
669 static int __init
zip_debugfs_init(void)
674 static void __exit
zip_debugfs_exit(void) { }
679 static int __init
zip_init_module(void)
683 zip_msg("%s\n", DRV_NAME
);
685 ret
= pci_register_driver(&zip_driver
);
687 zip_err("ZIP: pci_register_driver() failed\n");
691 /* Register with the Kernel Crypto Interface */
692 ret
= zip_register_compression_device();
694 zip_err("ZIP: Kernel Crypto Registration failed\n");
695 goto err_pci_unregister
;
698 /* comp-decomp statistics are handled with debugfs interface */
699 ret
= zip_debugfs_init();
701 zip_err("ZIP: debugfs initialization failed\n");
702 goto err_crypto_unregister
;
707 err_crypto_unregister
:
708 zip_unregister_compression_device();
711 pci_unregister_driver(&zip_driver
);
715 static void __exit
zip_cleanup_module(void)
719 /* Unregister from the kernel crypto interface */
720 zip_unregister_compression_device();
722 /* Unregister this driver for pci zip devices */
723 pci_unregister_driver(&zip_driver
);
726 module_init(zip_init_module
);
727 module_exit(zip_cleanup_module
);
729 MODULE_AUTHOR("Cavium Inc");
730 MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
731 MODULE_LICENSE("GPL v2");
732 MODULE_DEVICE_TABLE(pci
, zip_id_table
);