1 #ifndef TARGET_CORE_FABRIC_H
2 #define TARGET_CORE_FABRIC_H
4 struct target_core_fabric_ops
{
9 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
10 * Setting this value tells target-core to enforce this limit, and
11 * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
13 * target-core will currently reset se_cmd->data_length to this
14 * maximum size, and set UNDERFLOW residual count if length exceeds
17 * XXX: Not all initiator hosts honor this block-limit EVPD
18 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
20 u32 max_data_sg_nents
;
21 char *(*get_fabric_name
)(void);
22 char *(*tpg_get_wwn
)(struct se_portal_group
*);
23 u16 (*tpg_get_tag
)(struct se_portal_group
*);
24 u32 (*tpg_get_default_depth
)(struct se_portal_group
*);
25 int (*tpg_check_demo_mode
)(struct se_portal_group
*);
26 int (*tpg_check_demo_mode_cache
)(struct se_portal_group
*);
27 int (*tpg_check_demo_mode_write_protect
)(struct se_portal_group
*);
28 int (*tpg_check_prod_mode_write_protect
)(struct se_portal_group
*);
30 * Optionally used by fabrics to allow demo-mode login, but not
31 * expose any TPG LUNs, and return 'not connected' in standard
34 int (*tpg_check_demo_mode_login_only
)(struct se_portal_group
*);
36 * Optionally used as a configfs tunable to determine when
37 * target-core should signal the PROTECT=1 feature bit for
38 * backends that don't support T10-PI, so that either fabric
39 * HW offload or target-core emulation performs the associated
40 * WRITE_STRIP and READ_INSERT operations.
42 int (*tpg_check_prot_fabric_only
)(struct se_portal_group
*);
43 u32 (*tpg_get_inst_index
)(struct se_portal_group
*);
45 * Optional to release struct se_cmd and fabric dependent allocated
46 * I/O descriptor in transport_cmd_check_stop().
48 * Returning 1 will signal a descriptor has been released.
49 * Returning 0 will signal a descriptor has not been released.
51 int (*check_stop_free
)(struct se_cmd
*);
52 void (*release_cmd
)(struct se_cmd
*);
54 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
56 int (*shutdown_session
)(struct se_session
*);
57 void (*close_session
)(struct se_session
*);
58 u32 (*sess_get_index
)(struct se_session
*);
60 * Used only for SCSI fabrics that contain multi-value TransportIDs
61 * (like iSCSI). All other SCSI fabrics should set this to NULL.
63 u32 (*sess_get_initiator_sid
)(struct se_session
*,
64 unsigned char *, u32
);
65 int (*write_pending
)(struct se_cmd
*);
66 int (*write_pending_status
)(struct se_cmd
*);
67 void (*set_default_node_attributes
)(struct se_node_acl
*);
68 int (*get_cmd_state
)(struct se_cmd
*);
69 int (*queue_data_in
)(struct se_cmd
*);
70 int (*queue_status
)(struct se_cmd
*);
71 void (*queue_tm_rsp
)(struct se_cmd
*);
72 void (*aborted_task
)(struct se_cmd
*);
74 * fabric module calls for target_core_fabric_configfs.c
76 struct se_wwn
*(*fabric_make_wwn
)(struct target_fabric_configfs
*,
77 struct config_group
*, const char *);
78 void (*fabric_drop_wwn
)(struct se_wwn
*);
79 struct se_portal_group
*(*fabric_make_tpg
)(struct se_wwn
*,
80 struct config_group
*, const char *);
81 void (*fabric_drop_tpg
)(struct se_portal_group
*);
82 int (*fabric_post_link
)(struct se_portal_group
*,
84 void (*fabric_pre_unlink
)(struct se_portal_group
*,
86 struct se_tpg_np
*(*fabric_make_np
)(struct se_portal_group
*,
87 struct config_group
*, const char *);
88 void (*fabric_drop_np
)(struct se_tpg_np
*);
89 int (*fabric_init_nodeacl
)(struct se_node_acl
*, const char *);
90 void (*fabric_cleanup_nodeacl
)(struct se_node_acl
*);
92 struct configfs_attribute
**tfc_discovery_attrs
;
93 struct configfs_attribute
**tfc_wwn_attrs
;
94 struct configfs_attribute
**tfc_tpg_base_attrs
;
95 struct configfs_attribute
**tfc_tpg_np_base_attrs
;
96 struct configfs_attribute
**tfc_tpg_attrib_attrs
;
97 struct configfs_attribute
**tfc_tpg_auth_attrs
;
98 struct configfs_attribute
**tfc_tpg_param_attrs
;
99 struct configfs_attribute
**tfc_tpg_nacl_base_attrs
;
100 struct configfs_attribute
**tfc_tpg_nacl_attrib_attrs
;
101 struct configfs_attribute
**tfc_tpg_nacl_auth_attrs
;
102 struct configfs_attribute
**tfc_tpg_nacl_param_attrs
;
105 int target_register_template(const struct target_core_fabric_ops
*fo
);
106 void target_unregister_template(const struct target_core_fabric_ops
*fo
);
108 int target_depend_item(struct config_item
*item
);
109 void target_undepend_item(struct config_item
*item
);
111 struct se_session
*transport_init_session(enum target_prot_op
);
112 int transport_alloc_session_tags(struct se_session
*, unsigned int,
114 struct se_session
*transport_init_session_tags(unsigned int, unsigned int,
115 enum target_prot_op
);
116 void __transport_register_session(struct se_portal_group
*,
117 struct se_node_acl
*, struct se_session
*, void *);
118 void transport_register_session(struct se_portal_group
*,
119 struct se_node_acl
*, struct se_session
*, void *);
120 void target_get_session(struct se_session
*);
121 void target_put_session(struct se_session
*);
122 ssize_t
target_show_dynamic_sessions(struct se_portal_group
*, char *);
123 void transport_free_session(struct se_session
*);
124 void target_put_nacl(struct se_node_acl
*);
125 void transport_deregister_session_configfs(struct se_session
*);
126 void transport_deregister_session(struct se_session
*);
129 void transport_init_se_cmd(struct se_cmd
*,
130 const struct target_core_fabric_ops
*,
131 struct se_session
*, u32
, int, int, unsigned char *);
132 sense_reason_t
transport_lookup_cmd_lun(struct se_cmd
*, u64
);
133 sense_reason_t
target_setup_cmd_from_cdb(struct se_cmd
*, unsigned char *);
134 int target_submit_cmd_map_sgls(struct se_cmd
*, struct se_session
*,
135 unsigned char *, unsigned char *, u64
, u32
, int, int, int,
136 struct scatterlist
*, u32
, struct scatterlist
*, u32
,
137 struct scatterlist
*, u32
);
138 int target_submit_cmd(struct se_cmd
*, struct se_session
*, unsigned char *,
139 unsigned char *, u64
, u32
, int, int, int);
140 int target_submit_tmr(struct se_cmd
*se_cmd
, struct se_session
*se_sess
,
141 unsigned char *sense
, u64 unpacked_lun
,
142 void *fabric_tmr_ptr
, unsigned char tm_type
,
143 gfp_t
, unsigned int, int);
144 int transport_handle_cdb_direct(struct se_cmd
*);
145 sense_reason_t
transport_generic_new_cmd(struct se_cmd
*);
147 void target_execute_cmd(struct se_cmd
*cmd
);
149 int transport_generic_free_cmd(struct se_cmd
*, int);
151 bool transport_wait_for_tasks(struct se_cmd
*);
152 int transport_check_aborted_status(struct se_cmd
*, int);
153 int transport_send_check_condition_and_sense(struct se_cmd
*,
154 sense_reason_t
, int);
155 int target_get_sess_cmd(struct se_cmd
*, bool);
156 int target_put_sess_cmd(struct se_cmd
*);
157 void target_sess_cmd_list_set_waiting(struct se_session
*);
158 void target_wait_for_sess_cmds(struct se_session
*);
160 int core_alua_check_nonop_delay(struct se_cmd
*);
162 int core_tmr_alloc_req(struct se_cmd
*, void *, u8
, gfp_t
);
163 void core_tmr_release_req(struct se_tmr_req
*);
164 int transport_generic_handle_tmr(struct se_cmd
*);
165 void transport_generic_request_failure(struct se_cmd
*, sense_reason_t
);
166 void __target_execute_cmd(struct se_cmd
*);
167 int transport_lookup_tmr_lun(struct se_cmd
*, u64
);
168 void core_allocate_nexus_loss_ua(struct se_node_acl
*acl
);
170 struct se_node_acl
*core_tpg_get_initiator_node_acl(struct se_portal_group
*tpg
,
172 struct se_node_acl
*core_tpg_check_initiator_node_acl(struct se_portal_group
*,
174 int core_tpg_set_initiator_node_queue_depth(struct se_portal_group
*,
175 unsigned char *, u32
, int);
176 int core_tpg_set_initiator_node_tag(struct se_portal_group
*,
177 struct se_node_acl
*, const char *);
178 int core_tpg_register(struct se_wwn
*, struct se_portal_group
*, int);
179 int core_tpg_deregister(struct se_portal_group
*);
182 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
183 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
184 * that data is coming from the target (eg handling a READ). However,
185 * this is just the opposite of what we have to tell the DMA mapping
186 * layer -- eg when handling a READ, the HBA will have to DMA the data
187 * out of memory so it can send it to the initiator, which means we
188 * need to use DMA_TO_DEVICE when we map the data.
190 static inline enum dma_data_direction
191 target_reverse_dma_direction(struct se_cmd
*se_cmd
)
193 if (se_cmd
->se_cmd_flags
& SCF_BIDI
)
194 return DMA_BIDIRECTIONAL
;
196 switch (se_cmd
->data_direction
) {
198 return DMA_FROM_DEVICE
;
199 case DMA_FROM_DEVICE
:
200 return DMA_TO_DEVICE
;
207 #endif /* TARGET_CORE_FABRICH */