1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
7 #include "efct_driver.h"
13 spinlock_t lock
; /* IO pool lock */
14 u32 io_num_ios
; /* Total IOs allocated */
15 struct efct_io
*ios
[EFCT_NUM_SCSI_IOS
];
16 struct list_head freelist
;
21 efct_io_pool_create(struct efct
*efct
, u32 num_sgl
)
24 struct efct_io_pool
*io_pool
;
27 /* Allocate the IO pool */
28 io_pool
= kzalloc(sizeof(*io_pool
), GFP_KERNEL
);
33 INIT_LIST_HEAD(&io_pool
->freelist
);
34 /* initialize IO pool lock */
35 spin_lock_init(&io_pool
->lock
);
37 for (i
= 0; i
< EFCT_NUM_SCSI_IOS
; i
++) {
38 io
= kzalloc(sizeof(*io
), GFP_KERNEL
);
42 io_pool
->io_num_ios
++;
45 io
->instance_index
= i
;
47 /* Allocate a response buffer */
48 io
->rspbuf
.size
= SCSI_RSP_BUF_LENGTH
;
49 io
->rspbuf
.virt
= dma_alloc_coherent(&efct
->pci
->dev
,
51 &io
->rspbuf
.phys
, GFP_KERNEL
);
52 if (!io
->rspbuf
.virt
) {
53 efc_log_err(efct
, "dma_alloc rspbuf failed\n");
54 efct_io_pool_free(io_pool
);
59 io
->sgl
= kzalloc(sizeof(*io
->sgl
) * num_sgl
, GFP_KERNEL
);
61 efct_io_pool_free(io_pool
);
65 io
->sgl_allocated
= num_sgl
;
68 INIT_LIST_HEAD(&io
->list_entry
);
69 list_add_tail(&io
->list_entry
, &io_pool
->freelist
);
76 efct_io_pool_free(struct efct_io_pool
*io_pool
)
85 for (i
= 0; i
< io_pool
->io_num_ios
; i
++) {
91 dma_free_coherent(&efct
->pci
->dev
,
92 io
->rspbuf
.size
, io
->rspbuf
.virt
,
94 memset(&io
->rspbuf
, 0, sizeof(struct efc_dma
));
98 efct
->xport
->io_pool
= NULL
;
105 efct_io_pool_io_alloc(struct efct_io_pool
*io_pool
)
107 struct efct_io
*io
= NULL
;
109 unsigned long flags
= 0;
111 efct
= io_pool
->efct
;
113 spin_lock_irqsave(&io_pool
->lock
, flags
);
115 if (!list_empty(&io_pool
->freelist
)) {
116 io
= list_first_entry(&io_pool
->freelist
, struct efct_io
,
118 list_del_init(&io
->list_entry
);
121 spin_unlock_irqrestore(&io_pool
->lock
, flags
);
126 io
->io_type
= EFCT_IO_TYPE_MAX
;
127 io
->hio_type
= EFCT_HW_IO_MAX
;
133 io
->tgt_task_tag
= 0;
134 io
->init_task_tag
= 0;
136 io
->display_name
= "pending";
140 atomic_add_return(1, &efct
->xport
->io_active_count
);
141 atomic_add_return(1, &efct
->xport
->io_total_alloc
);
145 /* Free an object used to track an IO */
147 efct_io_pool_io_free(struct efct_io_pool
*io_pool
, struct efct_io
*io
)
150 struct efct_hw_io
*hio
= NULL
;
151 unsigned long flags
= 0;
153 efct
= io_pool
->efct
;
155 spin_lock_irqsave(&io_pool
->lock
, flags
);
159 INIT_LIST_HEAD(&io
->list_entry
);
160 list_add(&io
->list_entry
, &io_pool
->freelist
);
161 spin_unlock_irqrestore(&io_pool
->lock
, flags
);
164 efct_hw_io_free(&efct
->hw
, hio
);
166 atomic_sub_return(1, &efct
->xport
->io_active_count
);
167 atomic_add_return(1, &efct
->xport
->io_total_free
);
170 /* Find an I/O given it's node and ox_id */
172 efct_io_find_tgt_io(struct efct
*efct
, struct efct_node
*node
,
173 u16 ox_id
, u16 rx_id
)
175 struct efct_io
*io
= NULL
;
176 unsigned long flags
= 0;
179 spin_lock_irqsave(&node
->active_ios_lock
, flags
);
180 list_for_each_entry(io
, &node
->active_ios
, list_entry
) {
181 if ((io
->cmd_tgt
&& io
->init_task_tag
== ox_id
) &&
182 (rx_id
== 0xffff || io
->tgt_task_tag
== rx_id
)) {
183 if (kref_get_unless_zero(&io
->ref
))
188 spin_unlock_irqrestore(&node
->active_ios_lock
, flags
);
189 return found
? io
: NULL
;